2024-11-25 17:07:47,643 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-25 17:07:47,698 main DEBUG Took 0.032431 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-25 17:07:47,699 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-25 17:07:47,699 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-25 17:07:47,701 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-25 17:07:47,704 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 17:07:47,722 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-25 17:07:47,751 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 17:07:47,758 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 17:07:47,759 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 17:07:47,759 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 17:07:47,760 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 17:07:47,760 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 17:07:47,763 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 17:07:47,764 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 17:07:47,764 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 17:07:47,767 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 17:07:47,769 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 17:07:47,769 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 17:07:47,772 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 17:07:47,772 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 17:07:47,773 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 17:07:47,774 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 17:07:47,776 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 17:07:47,777 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 17:07:47,779 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 17:07:47,780 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 17:07:47,780 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 17:07:47,782 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 17:07:47,783 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 17:07:47,786 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 17:07:47,786 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 17:07:47,787 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-25 17:07:47,788 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 17:07:47,790 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-25 17:07:47,792 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-25 17:07:47,793 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-25 17:07:47,794 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-25 17:07:47,795 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-25 17:07:47,806 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-25 17:07:47,809 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-25 17:07:47,819 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-25 17:07:47,820 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-25 17:07:47,820 main DEBUG createAppenders(={Console}) 2024-11-25 17:07:47,821 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-25 17:07:47,822 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-25 17:07:47,823 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-25 17:07:47,824 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-25 17:07:47,824 main DEBUG OutputStream closed 2024-11-25 17:07:47,825 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-25 17:07:47,825 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-25 17:07:47,826 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-25 17:07:47,963 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-25 17:07:47,967 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-25 17:07:47,969 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-25 17:07:47,970 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-25 17:07:47,975 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-25 17:07:47,975 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-25 17:07:47,976 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-25 17:07:47,976 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-25 17:07:47,977 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-25 17:07:47,977 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-25 17:07:47,978 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-25 17:07:47,978 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-25 17:07:47,979 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-25 17:07:47,979 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-25 17:07:47,979 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-25 17:07:47,980 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-25 17:07:47,980 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-25 17:07:47,981 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-25 17:07:47,985 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-25 17:07:47,985 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-25 17:07:47,986 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-25 17:07:47,987 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-25T17:07:48,540 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208 2024-11-25 17:07:48,543 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-25 17:07:48,544 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-25T17:07:48,564 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-11-25T17:07:48,617 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T17:07:48,622 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/cluster_d0c44768-23d8-26f7-d2cc-a3902272cb55, deleteOnExit=true 2024-11-25T17:07:48,623 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-25T17:07:48,625 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/test.cache.data in system properties and HBase conf 2024-11-25T17:07:48,626 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T17:07:48,626 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/hadoop.log.dir in system properties and HBase conf 2024-11-25T17:07:48,629 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T17:07:48,630 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T17:07:48,631 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-25T17:07:48,841 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-25T17:07:48,994 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T17:07:49,000 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T17:07:49,001 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T17:07:49,006 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T17:07:49,006 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T17:07:49,007 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T17:07:49,007 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T17:07:49,009 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T17:07:49,010 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T17:07:49,010 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T17:07:49,011 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/nfs.dump.dir in system properties and HBase conf 2024-11-25T17:07:49,011 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/java.io.tmpdir in system properties and HBase conf 2024-11-25T17:07:49,012 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T17:07:49,013 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T17:07:49,013 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T17:07:50,223 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-25T17:07:50,347 INFO [Time-limited test {}] log.Log(170): Logging initialized @4010ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-25T17:07:50,450 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T17:07:50,538 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T17:07:50,569 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T17:07:50,570 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T17:07:50,572 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T17:07:50,594 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T17:07:50,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/hadoop.log.dir/,AVAILABLE} 2024-11-25T17:07:50,599 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T17:07:50,869 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/java.io.tmpdir/jetty-localhost-42475-hadoop-hdfs-3_4_1-tests_jar-_-any-11738548742374302022/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T17:07:50,877 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:42475} 2024-11-25T17:07:50,877 INFO [Time-limited test {}] server.Server(415): Started @4541ms 2024-11-25T17:07:51,465 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T17:07:51,476 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T17:07:51,478 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T17:07:51,478 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T17:07:51,479 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T17:07:51,480 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@134e7cc5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/hadoop.log.dir/,AVAILABLE} 2024-11-25T17:07:51,481 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ca71a25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T17:07:51,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10ba49e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/java.io.tmpdir/jetty-localhost-45385-hadoop-hdfs-3_4_1-tests_jar-_-any-6468224349546104490/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T17:07:51,630 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@661c2e9c{HTTP/1.1, (http/1.1)}{localhost:45385} 2024-11-25T17:07:51,631 INFO [Time-limited test {}] server.Server(415): Started @5295ms 2024-11-25T17:07:51,712 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T17:07:52,473 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/cluster_d0c44768-23d8-26f7-d2cc-a3902272cb55/dfs/data/data1/current/BP-896136284-172.17.0.3-1732554469894/current, will proceed with Du for space computation calculation, 2024-11-25T17:07:52,473 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/cluster_d0c44768-23d8-26f7-d2cc-a3902272cb55/dfs/data/data2/current/BP-896136284-172.17.0.3-1732554469894/current, will proceed with Du for space computation calculation, 2024-11-25T17:07:52,531 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T17:07:52,598 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb9dde328db004a1b with lease ID 0x2c619802ae038071: Processing first storage report for DS-3adc48ab-6b9c-479d-b63c-4b48a956193e from datanode DatanodeRegistration(127.0.0.1:33067, datanodeUuid=2e3cadf8-29ef-4329-ac23-cf3711913468, infoPort=39689, infoSecurePort=0, ipcPort=33367, storageInfo=lv=-57;cid=testClusterID;nsid=1426133524;c=1732554469894) 2024-11-25T17:07:52,599 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb9dde328db004a1b with lease ID 0x2c619802ae038071: from storage DS-3adc48ab-6b9c-479d-b63c-4b48a956193e node DatanodeRegistration(127.0.0.1:33067, datanodeUuid=2e3cadf8-29ef-4329-ac23-cf3711913468, infoPort=39689, infoSecurePort=0, ipcPort=33367, storageInfo=lv=-57;cid=testClusterID;nsid=1426133524;c=1732554469894), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-25T17:07:52,600 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb9dde328db004a1b with lease ID 0x2c619802ae038071: Processing first storage report for DS-0ecb2776-1bff-4462-b6a7-689fbf2c30bd from datanode DatanodeRegistration(127.0.0.1:33067, datanodeUuid=2e3cadf8-29ef-4329-ac23-cf3711913468, infoPort=39689, infoSecurePort=0, ipcPort=33367, storageInfo=lv=-57;cid=testClusterID;nsid=1426133524;c=1732554469894) 2024-11-25T17:07:52,600 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb9dde328db004a1b with lease ID 0x2c619802ae038071: from storage DS-0ecb2776-1bff-4462-b6a7-689fbf2c30bd node DatanodeRegistration(127.0.0.1:33067, datanodeUuid=2e3cadf8-29ef-4329-ac23-cf3711913468, infoPort=39689, infoSecurePort=0, ipcPort=33367, storageInfo=lv=-57;cid=testClusterID;nsid=1426133524;c=1732554469894), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T17:07:52,630 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208 2024-11-25T17:07:52,738 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/cluster_d0c44768-23d8-26f7-d2cc-a3902272cb55/zookeeper_0, clientPort=56265, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/cluster_d0c44768-23d8-26f7-d2cc-a3902272cb55/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/cluster_d0c44768-23d8-26f7-d2cc-a3902272cb55/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T17:07:52,751 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=56265 2024-11-25T17:07:52,762 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T17:07:52,764 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T17:07:53,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741825_1001 (size=7) 2024-11-25T17:07:53,537 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4 with version=8 2024-11-25T17:07:53,538 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/hbase-staging 2024-11-25T17:07:53,676 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-25T17:07:53,975 INFO [Time-limited test {}] client.ConnectionUtils(129): master/6579369734b6:0 server-side Connection retries=45 2024-11-25T17:07:53,994 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T17:07:53,995 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T17:07:53,995 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T17:07:53,995 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T17:07:53,995 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T17:07:54,135 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T17:07:54,197 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-25T17:07:54,206 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-25T17:07:54,209 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T17:07:54,236 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 1042 (auto-detected) 2024-11-25T17:07:54,237 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-25T17:07:54,256 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.3:33083 2024-11-25T17:07:54,264 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T17:07:54,267 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T17:07:54,278 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:33083 connecting to ZooKeeper ensemble=127.0.0.1:56265 2024-11-25T17:07:54,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:330830x0, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T17:07:54,321 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33083-0x1012ade31b40000 connected 2024-11-25T17:07:54,356 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T17:07:54,360 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T17:07:54,363 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T17:07:54,369 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33083 2024-11-25T17:07:54,370 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33083 2024-11-25T17:07:54,371 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33083 2024-11-25T17:07:54,372 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33083 2024-11-25T17:07:54,372 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33083 2024-11-25T17:07:54,379 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4, hbase.cluster.distributed=false 2024-11-25T17:07:54,465 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/6579369734b6:0 server-side Connection retries=45 2024-11-25T17:07:54,466 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T17:07:54,466 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T17:07:54,466 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T17:07:54,466 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T17:07:54,467 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T17:07:54,471 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T17:07:54,474 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T17:07:54,477 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.3:41865 2024-11-25T17:07:54,479 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T17:07:54,487 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T17:07:54,489 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T17:07:54,493 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T17:07:54,499 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41865 connecting to ZooKeeper ensemble=127.0.0.1:56265 2024-11-25T17:07:54,505 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:418650x0, quorum=127.0.0.1:56265, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T17:07:54,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:418650x0, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T17:07:54,506 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41865-0x1012ade31b40001 connected 2024-11-25T17:07:54,507 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T17:07:54,508 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T17:07:54,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41865 2024-11-25T17:07:54,517 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41865 2024-11-25T17:07:54,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41865 2024-11-25T17:07:54,522 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41865 2024-11-25T17:07:54,523 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41865 2024-11-25T17:07:54,526 INFO [master/6579369734b6:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/6579369734b6,33083,1732554473669 2024-11-25T17:07:54,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T17:07:54,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T17:07:54,540 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6579369734b6,33083,1732554473669 2024-11-25T17:07:54,548 DEBUG [M:0;6579369734b6:33083 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6579369734b6:33083 2024-11-25T17:07:54,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T17:07:54,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T17:07:54,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:07:54,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:07:54,577 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T17:07:54,575 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T17:07:54,579 INFO [master/6579369734b6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6579369734b6,33083,1732554473669 from backup master directory 2024-11-25T17:07:54,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6579369734b6,33083,1732554473669 2024-11-25T17:07:54,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T17:07:54,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T17:07:54,584 WARN [master/6579369734b6:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T17:07:54,584 INFO [master/6579369734b6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6579369734b6,33083,1732554473669 2024-11-25T17:07:54,587 INFO [master/6579369734b6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-25T17:07:54,589 INFO [master/6579369734b6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-25T17:07:54,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741826_1002 (size=42) 2024-11-25T17:07:54,709 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/hbase.id with ID: db2642be-f7da-45ed-880a-e767e554f8b7 2024-11-25T17:07:54,765 INFO [master/6579369734b6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T17:07:54,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:07:54,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:07:54,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741827_1003 (size=196) 2024-11-25T17:07:54,839 INFO [master/6579369734b6:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T17:07:54,842 INFO [master/6579369734b6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T17:07:54,863 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:07:54,869 INFO [master/6579369734b6:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-25T17:07:54,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741828_1004 (size=1189) 2024-11-25T17:07:55,329 INFO [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store 2024-11-25T17:07:55,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741829_1005 (size=34) 2024-11-25T17:07:55,750 INFO [master/6579369734b6:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-25T17:07:55,751 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:07:55,752 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T17:07:55,752 INFO [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T17:07:55,752 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T17:07:55,752 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T17:07:55,752 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T17:07:55,753 INFO [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T17:07:55,753 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-25T17:07:55,755 WARN [master/6579369734b6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/.initializing 2024-11-25T17:07:55,755 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/WALs/6579369734b6,33083,1732554473669 2024-11-25T17:07:55,761 INFO [master/6579369734b6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-25T17:07:55,772 INFO [master/6579369734b6:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6579369734b6%2C33083%2C1732554473669, suffix=, logDir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/WALs/6579369734b6,33083,1732554473669, archiveDir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/oldWALs, maxLogs=10 2024-11-25T17:07:55,794 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/WALs/6579369734b6,33083,1732554473669/6579369734b6%2C33083%2C1732554473669.1732554475777, exclude list is [], retry=0 2024-11-25T17:07:55,809 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33067,DS-3adc48ab-6b9c-479d-b63c-4b48a956193e,DISK] 2024-11-25T17:07:55,812 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-25T17:07:55,847 INFO [master/6579369734b6:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/WALs/6579369734b6,33083,1732554473669/6579369734b6%2C33083%2C1732554473669.1732554475777 2024-11-25T17:07:55,848 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39689:39689)] 2024-11-25T17:07:55,849 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T17:07:55,849 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:07:55,854 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T17:07:55,855 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T17:07:55,900 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T17:07:55,930 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T17:07:55,935 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:07:55,939 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T17:07:55,939 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T17:07:55,945 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T17:07:55,945 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:07:55,947 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:07:55,947 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T17:07:55,950 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T17:07:55,950 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:07:55,952 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:07:55,952 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T17:07:55,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T17:07:55,955 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:07:55,955 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:07:55,960 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T17:07:55,963 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T17:07:55,977 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T17:07:55,981 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T17:07:55,987 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T17:07:55,988 INFO [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71259250, jitterRate=0.0618455708026886}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T17:07:55,994 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-25T17:07:55,995 INFO [master/6579369734b6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T17:07:56,033 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70a548d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:07:56,079 INFO [master/6579369734b6:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-25T17:07:56,092 INFO [master/6579369734b6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T17:07:56,092 INFO [master/6579369734b6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T17:07:56,094 INFO [master/6579369734b6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T17:07:56,096 INFO [master/6579369734b6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-25T17:07:56,101 INFO [master/6579369734b6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-11-25T17:07:56,101 INFO [master/6579369734b6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T17:07:56,132 INFO [master/6579369734b6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T17:07:56,147 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T17:07:56,151 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-25T17:07:56,153 INFO [master/6579369734b6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T17:07:56,155 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T17:07:56,157 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-25T17:07:56,159 INFO [master/6579369734b6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T17:07:56,167 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T17:07:56,169 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-25T17:07:56,170 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T17:07:56,172 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T17:07:56,182 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T17:07:56,184 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T17:07:56,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T17:07:56,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T17:07:56,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:07:56,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:07:56,190 INFO [master/6579369734b6:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=6579369734b6,33083,1732554473669, sessionid=0x1012ade31b40000, setting cluster-up flag (Was=false) 2024-11-25T17:07:56,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:07:56,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:07:56,210 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T17:07:56,211 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6579369734b6,33083,1732554473669 2024-11-25T17:07:56,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:07:56,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:07:56,227 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T17:07:56,229 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6579369734b6,33083,1732554473669 2024-11-25T17:07:56,330 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-25T17:07:56,336 INFO [master/6579369734b6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-25T17:07:56,339 INFO [master/6579369734b6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T17:07:56,345 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6579369734b6,33083,1732554473669 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T17:07:56,348 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6579369734b6:0, corePoolSize=5, maxPoolSize=5 2024-11-25T17:07:56,349 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6579369734b6:0, corePoolSize=5, maxPoolSize=5 2024-11-25T17:07:56,349 DEBUG [RS:0;6579369734b6:41865 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6579369734b6:41865 2024-11-25T17:07:56,349 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6579369734b6:0, corePoolSize=5, maxPoolSize=5 2024-11-25T17:07:56,349 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6579369734b6:0, corePoolSize=5, maxPoolSize=5 2024-11-25T17:07:56,349 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6579369734b6:0, corePoolSize=10, maxPoolSize=10 2024-11-25T17:07:56,350 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6579369734b6:0, corePoolSize=1, maxPoolSize=1 2024-11-25T17:07:56,350 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6579369734b6:0, corePoolSize=2, maxPoolSize=2 2024-11-25T17:07:56,350 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6579369734b6:0, corePoolSize=1, maxPoolSize=1 2024-11-25T17:07:56,350 INFO [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1008): ClusterId : db2642be-f7da-45ed-880a-e767e554f8b7 2024-11-25T17:07:56,353 DEBUG [RS:0;6579369734b6:41865 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T17:07:56,354 INFO [master/6579369734b6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732554506354 2024-11-25T17:07:56,356 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-25T17:07:56,356 INFO [master/6579369734b6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T17:07:56,356 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-25T17:07:56,357 INFO [master/6579369734b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T17:07:56,361 INFO [master/6579369734b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T17:07:56,361 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:07:56,361 INFO [master/6579369734b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T17:07:56,361 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T17:07:56,362 INFO [master/6579369734b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T17:07:56,362 INFO [master/6579369734b6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T17:07:56,366 INFO [master/6579369734b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:56,368 INFO [master/6579369734b6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T17:07:56,370 INFO [master/6579369734b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T17:07:56,370 INFO [master/6579369734b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T17:07:56,372 DEBUG [RS:0;6579369734b6:41865 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T17:07:56,372 DEBUG [RS:0;6579369734b6:41865 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T17:07:56,372 INFO [master/6579369734b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T17:07:56,373 INFO [master/6579369734b6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T17:07:56,375 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6579369734b6:0:becomeActiveMaster-HFileCleaner.large.0-1732554476374,5,FailOnTimeoutGroup] 2024-11-25T17:07:56,375 DEBUG [RS:0;6579369734b6:41865 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T17:07:56,376 DEBUG [RS:0;6579369734b6:41865 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63f6ee2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:07:56,377 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6579369734b6:0:becomeActiveMaster-HFileCleaner.small.0-1732554476375,5,FailOnTimeoutGroup] 2024-11-25T17:07:56,377 INFO [master/6579369734b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:56,378 INFO [master/6579369734b6:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T17:07:56,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741831_1007 (size=1039) 2024-11-25T17:07:56,379 INFO [master/6579369734b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:56,379 DEBUG [RS:0;6579369734b6:41865 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37d6cae0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6579369734b6/172.17.0.3:0 2024-11-25T17:07:56,379 INFO [master/6579369734b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:56,382 INFO [RS:0;6579369734b6:41865 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-25T17:07:56,382 INFO [RS:0;6579369734b6:41865 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-25T17:07:56,383 DEBUG [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-25T17:07:56,388 INFO [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(3073): reportForDuty to master=6579369734b6,33083,1732554473669 with isa=6579369734b6/172.17.0.3:41865, startcode=1732554474464 2024-11-25T17:07:56,401 DEBUG [RS:0;6579369734b6:41865 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T17:07:56,442 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57919, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T17:07:56,448 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33083 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 6579369734b6,41865,1732554474464 2024-11-25T17:07:56,451 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33083 {}] master.ServerManager(486): Registering regionserver=6579369734b6,41865,1732554474464 2024-11-25T17:07:56,466 DEBUG [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4 2024-11-25T17:07:56,467 DEBUG [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41117 2024-11-25T17:07:56,467 DEBUG [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-25T17:07:56,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T17:07:56,472 DEBUG [RS:0;6579369734b6:41865 {}] zookeeper.ZKUtil(111): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6579369734b6,41865,1732554474464 2024-11-25T17:07:56,472 WARN [RS:0;6579369734b6:41865 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T17:07:56,472 INFO [RS:0;6579369734b6:41865 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-25T17:07:56,473 DEBUG [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/WALs/6579369734b6,41865,1732554474464 2024-11-25T17:07:56,475 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6579369734b6,41865,1732554474464] 2024-11-25T17:07:56,488 DEBUG [RS:0;6579369734b6:41865 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-25T17:07:56,502 INFO [RS:0;6579369734b6:41865 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T17:07:56,516 INFO [RS:0;6579369734b6:41865 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T17:07:56,520 INFO [RS:0;6579369734b6:41865 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T17:07:56,520 INFO [RS:0;6579369734b6:41865 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:56,521 INFO [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-25T17:07:56,528 INFO [RS:0;6579369734b6:41865 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:56,528 DEBUG [RS:0;6579369734b6:41865 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6579369734b6:0, corePoolSize=1, maxPoolSize=1 2024-11-25T17:07:56,529 DEBUG [RS:0;6579369734b6:41865 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6579369734b6:0, corePoolSize=1, maxPoolSize=1 2024-11-25T17:07:56,529 DEBUG [RS:0;6579369734b6:41865 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6579369734b6:0, corePoolSize=1, maxPoolSize=1 2024-11-25T17:07:56,529 DEBUG [RS:0;6579369734b6:41865 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6579369734b6:0, corePoolSize=1, maxPoolSize=1 2024-11-25T17:07:56,529 DEBUG [RS:0;6579369734b6:41865 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6579369734b6:0, corePoolSize=1, maxPoolSize=1 2024-11-25T17:07:56,529 DEBUG [RS:0;6579369734b6:41865 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6579369734b6:0, corePoolSize=2, maxPoolSize=2 2024-11-25T17:07:56,529 DEBUG [RS:0;6579369734b6:41865 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0, corePoolSize=1, maxPoolSize=1 2024-11-25T17:07:56,529 DEBUG [RS:0;6579369734b6:41865 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6579369734b6:0, corePoolSize=1, maxPoolSize=1 2024-11-25T17:07:56,530 DEBUG [RS:0;6579369734b6:41865 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6579369734b6:0, corePoolSize=1, maxPoolSize=1 2024-11-25T17:07:56,533 DEBUG [RS:0;6579369734b6:41865 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6579369734b6:0, corePoolSize=1, maxPoolSize=1 2024-11-25T17:07:56,534 DEBUG [RS:0;6579369734b6:41865 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6579369734b6:0, corePoolSize=1, maxPoolSize=1 2024-11-25T17:07:56,534 DEBUG [RS:0;6579369734b6:41865 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6579369734b6:0, corePoolSize=3, maxPoolSize=3 2024-11-25T17:07:56,534 DEBUG [RS:0;6579369734b6:41865 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0, corePoolSize=3, maxPoolSize=3 2024-11-25T17:07:56,539 INFO [RS:0;6579369734b6:41865 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:56,539 INFO [RS:0;6579369734b6:41865 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:56,540 INFO [RS:0;6579369734b6:41865 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:56,540 INFO [RS:0;6579369734b6:41865 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:56,540 INFO [RS:0;6579369734b6:41865 {}] hbase.ChoreService(168): Chore ScheduledChore name=6579369734b6,41865,1732554474464-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T17:07:56,567 INFO [RS:0;6579369734b6:41865 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T17:07:56,570 INFO [RS:0;6579369734b6:41865 {}] hbase.ChoreService(168): Chore ScheduledChore name=6579369734b6,41865,1732554474464-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:56,607 INFO [RS:0;6579369734b6:41865 {}] regionserver.Replication(204): 6579369734b6,41865,1732554474464 started 2024-11-25T17:07:56,607 INFO [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1767): Serving as 6579369734b6,41865,1732554474464, RpcServer on 6579369734b6/172.17.0.3:41865, sessionid=0x1012ade31b40001 2024-11-25T17:07:56,608 DEBUG [RS:0;6579369734b6:41865 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T17:07:56,608 DEBUG [RS:0;6579369734b6:41865 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6579369734b6,41865,1732554474464 2024-11-25T17:07:56,608 DEBUG [RS:0;6579369734b6:41865 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6579369734b6,41865,1732554474464' 2024-11-25T17:07:56,608 DEBUG [RS:0;6579369734b6:41865 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T17:07:56,611 DEBUG [RS:0;6579369734b6:41865 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T17:07:56,612 DEBUG [RS:0;6579369734b6:41865 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T17:07:56,612 DEBUG [RS:0;6579369734b6:41865 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T17:07:56,612 DEBUG [RS:0;6579369734b6:41865 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6579369734b6,41865,1732554474464 2024-11-25T17:07:56,612 DEBUG [RS:0;6579369734b6:41865 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6579369734b6,41865,1732554474464' 2024-11-25T17:07:56,612 DEBUG [RS:0;6579369734b6:41865 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T17:07:56,613 DEBUG [RS:0;6579369734b6:41865 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T17:07:56,618 DEBUG [RS:0;6579369734b6:41865 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T17:07:56,619 INFO [RS:0;6579369734b6:41865 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T17:07:56,619 INFO [RS:0;6579369734b6:41865 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T17:07:56,726 INFO [RS:0;6579369734b6:41865 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-25T17:07:56,733 INFO [RS:0;6579369734b6:41865 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6579369734b6%2C41865%2C1732554474464, suffix=, logDir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/WALs/6579369734b6,41865,1732554474464, archiveDir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/oldWALs, maxLogs=32 2024-11-25T17:07:56,759 DEBUG [RS:0;6579369734b6:41865 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/WALs/6579369734b6,41865,1732554474464/6579369734b6%2C41865%2C1732554474464.1732554476736, exclude list is [], retry=0 2024-11-25T17:07:56,765 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33067,DS-3adc48ab-6b9c-479d-b63c-4b48a956193e,DISK] 2024-11-25T17:07:56,770 INFO [RS:0;6579369734b6:41865 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/WALs/6579369734b6,41865,1732554474464/6579369734b6%2C41865%2C1732554474464.1732554476736 2024-11-25T17:07:56,772 DEBUG [RS:0;6579369734b6:41865 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39689:39689)] 2024-11-25T17:07:56,780 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-25T17:07:56,780 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4 2024-11-25T17:07:56,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741833_1009 (size=32) 2024-11-25T17:07:57,195 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:07:57,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T17:07:57,202 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T17:07:57,202 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:07:57,203 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T17:07:57,204 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T17:07:57,206 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T17:07:57,207 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:07:57,208 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T17:07:57,208 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T17:07:57,211 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T17:07:57,211 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:07:57,212 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T17:07:57,214 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740 2024-11-25T17:07:57,214 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740 2024-11-25T17:07:57,218 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T17:07:57,221 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-25T17:07:57,225 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T17:07:57,226 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67535244, jitterRate=0.006353557109832764}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T17:07:57,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-25T17:07:57,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-25T17:07:57,229 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-25T17:07:57,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-25T17:07:57,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T17:07:57,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T17:07:57,230 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-25T17:07:57,231 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-25T17:07:57,233 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-25T17:07:57,233 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-25T17:07:57,239 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T17:07:57,250 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T17:07:57,252 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T17:07:57,404 DEBUG [6579369734b6:33083 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T17:07:57,409 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:07:57,414 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6579369734b6,41865,1732554474464, state=OPENING 2024-11-25T17:07:57,419 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T17:07:57,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:07:57,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:07:57,422 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T17:07:57,422 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T17:07:57,425 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:07:57,602 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:07:57,604 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T17:07:57,608 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40118, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T17:07:57,622 INFO [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-25T17:07:57,623 INFO [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-25T17:07:57,623 INFO [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-25T17:07:57,627 INFO [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6579369734b6%2C41865%2C1732554474464.meta, suffix=.meta, logDir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/WALs/6579369734b6,41865,1732554474464, archiveDir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/oldWALs, maxLogs=32 2024-11-25T17:07:57,650 DEBUG [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/WALs/6579369734b6,41865,1732554474464/6579369734b6%2C41865%2C1732554474464.meta.1732554477629.meta, exclude list is [], retry=0 2024-11-25T17:07:57,656 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33067,DS-3adc48ab-6b9c-479d-b63c-4b48a956193e,DISK] 2024-11-25T17:07:57,661 INFO [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/WALs/6579369734b6,41865,1732554474464/6579369734b6%2C41865%2C1732554474464.meta.1732554477629.meta 2024-11-25T17:07:57,662 DEBUG [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39689:39689)] 2024-11-25T17:07:57,662 DEBUG [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T17:07:57,664 DEBUG [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T17:07:57,725 DEBUG [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T17:07:57,730 INFO [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T17:07:57,735 DEBUG [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T17:07:57,735 DEBUG [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:07:57,735 DEBUG [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-25T17:07:57,735 DEBUG [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-25T17:07:57,739 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T17:07:57,741 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T17:07:57,741 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:07:57,743 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T17:07:57,743 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T17:07:57,745 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T17:07:57,745 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:07:57,746 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T17:07:57,746 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T17:07:57,748 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T17:07:57,748 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:07:57,750 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T17:07:57,751 DEBUG [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740 2024-11-25T17:07:57,754 DEBUG [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740 2024-11-25T17:07:57,756 DEBUG [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T17:07:57,759 DEBUG [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-25T17:07:57,761 INFO [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74913098, jitterRate=0.11629214882850647}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T17:07:57,763 DEBUG [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-25T17:07:57,770 INFO [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732554477595 2024-11-25T17:07:57,781 DEBUG [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T17:07:57,782 INFO [RS_OPEN_META-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-25T17:07:57,783 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:07:57,785 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6579369734b6,41865,1732554474464, state=OPEN 2024-11-25T17:07:57,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T17:07:57,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T17:07:57,789 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T17:07:57,790 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T17:07:57,794 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T17:07:57,794 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=6579369734b6,41865,1732554474464 in 365 msec 2024-11-25T17:07:57,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T17:07:57,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 556 msec 2024-11-25T17:07:57,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5350 sec 2024-11-25T17:07:57,805 INFO [master/6579369734b6:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732554477805, completionTime=-1 2024-11-25T17:07:57,805 INFO [master/6579369734b6:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T17:07:57,805 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-25T17:07:57,842 DEBUG [hconnection-0x327ba634-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:07:57,845 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40132, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:07:57,855 INFO [master/6579369734b6:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-25T17:07:57,855 INFO [master/6579369734b6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732554537855 2024-11-25T17:07:57,855 INFO [master/6579369734b6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732554597855 2024-11-25T17:07:57,856 INFO [master/6579369734b6:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 50 msec 2024-11-25T17:07:57,877 INFO [master/6579369734b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6579369734b6,33083,1732554473669-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:57,878 INFO [master/6579369734b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6579369734b6,33083,1732554473669-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:57,878 INFO [master/6579369734b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6579369734b6,33083,1732554473669-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:57,879 INFO [master/6579369734b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6579369734b6:33083, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:57,879 INFO [master/6579369734b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:57,885 DEBUG [master/6579369734b6:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-25T17:07:57,889 INFO [master/6579369734b6:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-25T17:07:57,890 INFO [master/6579369734b6:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T17:07:57,896 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-25T17:07:57,899 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T17:07:57,900 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:07:57,902 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T17:07:57,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741835_1011 (size=358) 2024-11-25T17:07:58,318 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 44c7b6d5dcb77061152173d1606a877a, NAME => 'hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4 2024-11-25T17:07:58,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741836_1012 (size=42) 2024-11-25T17:07:58,730 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:07:58,730 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 44c7b6d5dcb77061152173d1606a877a, disabling compactions & flushes 2024-11-25T17:07:58,730 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a. 2024-11-25T17:07:58,730 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a. 2024-11-25T17:07:58,730 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a. after waiting 0 ms 2024-11-25T17:07:58,730 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a. 2024-11-25T17:07:58,730 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a. 2024-11-25T17:07:58,731 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 44c7b6d5dcb77061152173d1606a877a: 2024-11-25T17:07:58,733 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T17:07:58,762 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732554478734"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732554478734"}]},"ts":"1732554478734"} 2024-11-25T17:07:58,794 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-25T17:07:58,796 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T17:07:58,799 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554478796"}]},"ts":"1732554478796"} 2024-11-25T17:07:58,805 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-25T17:07:58,812 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=44c7b6d5dcb77061152173d1606a877a, ASSIGN}] 2024-11-25T17:07:58,814 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=44c7b6d5dcb77061152173d1606a877a, ASSIGN 2024-11-25T17:07:58,816 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=44c7b6d5dcb77061152173d1606a877a, ASSIGN; state=OFFLINE, location=6579369734b6,41865,1732554474464; forceNewPlan=false, retain=false 2024-11-25T17:07:58,966 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=44c7b6d5dcb77061152173d1606a877a, regionState=OPENING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:07:58,983 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 44c7b6d5dcb77061152173d1606a877a, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:07:59,140 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:07:59,153 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a. 2024-11-25T17:07:59,154 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 44c7b6d5dcb77061152173d1606a877a, NAME => 'hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a.', STARTKEY => '', ENDKEY => ''} 2024-11-25T17:07:59,154 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 44c7b6d5dcb77061152173d1606a877a 2024-11-25T17:07:59,155 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:07:59,155 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 44c7b6d5dcb77061152173d1606a877a 2024-11-25T17:07:59,155 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 44c7b6d5dcb77061152173d1606a877a 2024-11-25T17:07:59,159 INFO [StoreOpener-44c7b6d5dcb77061152173d1606a877a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 44c7b6d5dcb77061152173d1606a877a 2024-11-25T17:07:59,166 INFO [StoreOpener-44c7b6d5dcb77061152173d1606a877a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 44c7b6d5dcb77061152173d1606a877a columnFamilyName info 2024-11-25T17:07:59,166 DEBUG [StoreOpener-44c7b6d5dcb77061152173d1606a877a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:07:59,169 INFO [StoreOpener-44c7b6d5dcb77061152173d1606a877a-1 {}] regionserver.HStore(327): Store=44c7b6d5dcb77061152173d1606a877a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:07:59,175 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/namespace/44c7b6d5dcb77061152173d1606a877a 2024-11-25T17:07:59,177 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/namespace/44c7b6d5dcb77061152173d1606a877a 2024-11-25T17:07:59,182 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 44c7b6d5dcb77061152173d1606a877a 2024-11-25T17:07:59,189 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/namespace/44c7b6d5dcb77061152173d1606a877a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T17:07:59,190 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 44c7b6d5dcb77061152173d1606a877a; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60121263, jitterRate=-0.10412336885929108}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T17:07:59,192 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 44c7b6d5dcb77061152173d1606a877a: 2024-11-25T17:07:59,194 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a., pid=6, masterSystemTime=1732554479140 2024-11-25T17:07:59,198 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a. 2024-11-25T17:07:59,198 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a. 2024-11-25T17:07:59,199 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=44c7b6d5dcb77061152173d1606a877a, regionState=OPEN, openSeqNum=2, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:07:59,209 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T17:07:59,210 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 44c7b6d5dcb77061152173d1606a877a, server=6579369734b6,41865,1732554474464 in 221 msec 2024-11-25T17:07:59,213 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T17:07:59,214 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=44c7b6d5dcb77061152173d1606a877a, ASSIGN in 398 msec 2024-11-25T17:07:59,215 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T17:07:59,216 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554479215"}]},"ts":"1732554479215"} 2024-11-25T17:07:59,219 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-25T17:07:59,224 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T17:07:59,229 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.3330 sec 2024-11-25T17:07:59,301 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-25T17:07:59,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-25T17:07:59,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:07:59,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:07:59,338 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-25T17:07:59,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-25T17:07:59,360 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 26 msec 2024-11-25T17:07:59,372 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-25T17:07:59,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-25T17:07:59,390 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 15 msec 2024-11-25T17:07:59,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-25T17:07:59,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-25T17:07:59,402 INFO [master/6579369734b6:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.818sec 2024-11-25T17:07:59,404 INFO [master/6579369734b6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T17:07:59,405 INFO [master/6579369734b6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T17:07:59,406 INFO [master/6579369734b6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T17:07:59,406 INFO [master/6579369734b6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T17:07:59,407 INFO [master/6579369734b6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T17:07:59,407 INFO [master/6579369734b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6579369734b6,33083,1732554473669-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T17:07:59,408 INFO [master/6579369734b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6579369734b6,33083,1732554473669-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T17:07:59,414 DEBUG [master/6579369734b6:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-25T17:07:59,415 INFO [master/6579369734b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T17:07:59,416 INFO [master/6579369734b6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6579369734b6,33083,1732554473669-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T17:07:59,449 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3771e354 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@38630296 2024-11-25T17:07:59,450 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-25T17:07:59,457 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6321da62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:07:59,460 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-25T17:07:59,460 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-25T17:07:59,473 DEBUG [hconnection-0x13adb0ff-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:07:59,485 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40140, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:07:59,495 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=6579369734b6,33083,1732554473669 2024-11-25T17:07:59,511 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=218, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=582, ProcessCount=11, AvailableMemoryMB=3313 2024-11-25T17:07:59,524 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T17:07:59,526 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46820, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T17:07:59,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-25T17:07:59,537 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T17:07:59,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-25T17:07:59,541 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T17:07:59,541 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-25T17:07:59,541 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:07:59,543 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T17:07:59,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-25T17:07:59,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741837_1013 (size=963) 2024-11-25T17:07:59,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-25T17:07:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-25T17:07:59,956 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4 2024-11-25T17:07:59,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741838_1014 (size=53) 2024-11-25T17:07:59,966 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:07:59,966 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 140432b4069c8ca485d8f3971c9e31fe, disabling compactions & flushes 2024-11-25T17:07:59,966 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:07:59,966 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:07:59,966 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. after waiting 0 ms 2024-11-25T17:07:59,967 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:07:59,967 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:07:59,967 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:07:59,969 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T17:07:59,969 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732554479969"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732554479969"}]},"ts":"1732554479969"} 2024-11-25T17:07:59,972 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-25T17:07:59,974 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T17:07:59,974 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554479974"}]},"ts":"1732554479974"} 2024-11-25T17:07:59,976 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-25T17:07:59,980 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=140432b4069c8ca485d8f3971c9e31fe, ASSIGN}] 2024-11-25T17:07:59,982 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=140432b4069c8ca485d8f3971c9e31fe, ASSIGN 2024-11-25T17:07:59,983 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=140432b4069c8ca485d8f3971c9e31fe, ASSIGN; state=OFFLINE, location=6579369734b6,41865,1732554474464; forceNewPlan=false, retain=false 2024-11-25T17:08:00,134 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=140432b4069c8ca485d8f3971c9e31fe, regionState=OPENING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:08:00,138 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:08:00,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-25T17:08:00,292 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:00,301 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:00,302 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} 2024-11-25T17:08:00,302 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:00,302 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:08:00,302 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:00,303 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:00,306 INFO [StoreOpener-140432b4069c8ca485d8f3971c9e31fe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:00,310 INFO [StoreOpener-140432b4069c8ca485d8f3971c9e31fe-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:08:00,310 INFO [StoreOpener-140432b4069c8ca485d8f3971c9e31fe-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 140432b4069c8ca485d8f3971c9e31fe columnFamilyName A 2024-11-25T17:08:00,310 DEBUG [StoreOpener-140432b4069c8ca485d8f3971c9e31fe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:00,311 INFO [StoreOpener-140432b4069c8ca485d8f3971c9e31fe-1 {}] regionserver.HStore(327): Store=140432b4069c8ca485d8f3971c9e31fe/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:08:00,312 INFO [StoreOpener-140432b4069c8ca485d8f3971c9e31fe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:00,314 INFO [StoreOpener-140432b4069c8ca485d8f3971c9e31fe-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:08:00,314 INFO [StoreOpener-140432b4069c8ca485d8f3971c9e31fe-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 140432b4069c8ca485d8f3971c9e31fe columnFamilyName B 2024-11-25T17:08:00,314 DEBUG [StoreOpener-140432b4069c8ca485d8f3971c9e31fe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:00,315 INFO [StoreOpener-140432b4069c8ca485d8f3971c9e31fe-1 {}] regionserver.HStore(327): Store=140432b4069c8ca485d8f3971c9e31fe/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:08:00,315 INFO [StoreOpener-140432b4069c8ca485d8f3971c9e31fe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:00,317 INFO [StoreOpener-140432b4069c8ca485d8f3971c9e31fe-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:08:00,318 INFO [StoreOpener-140432b4069c8ca485d8f3971c9e31fe-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 140432b4069c8ca485d8f3971c9e31fe columnFamilyName C 2024-11-25T17:08:00,318 DEBUG [StoreOpener-140432b4069c8ca485d8f3971c9e31fe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:00,319 INFO [StoreOpener-140432b4069c8ca485d8f3971c9e31fe-1 {}] regionserver.HStore(327): Store=140432b4069c8ca485d8f3971c9e31fe/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:08:00,319 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:00,321 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:00,322 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:00,325 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T17:08:00,327 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:00,331 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T17:08:00,331 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 140432b4069c8ca485d8f3971c9e31fe; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72073883, jitterRate=0.0739845484495163}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T17:08:00,332 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:00,334 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., pid=11, masterSystemTime=1732554480292 2024-11-25T17:08:00,337 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:00,337 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:00,338 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=140432b4069c8ca485d8f3971c9e31fe, regionState=OPEN, openSeqNum=2, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:08:00,344 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-25T17:08:00,344 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 in 203 msec 2024-11-25T17:08:00,348 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-25T17:08:00,348 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=140432b4069c8ca485d8f3971c9e31fe, ASSIGN in 364 msec 2024-11-25T17:08:00,350 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T17:08:00,350 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554480350"}]},"ts":"1732554480350"} 2024-11-25T17:08:00,352 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-25T17:08:00,356 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T17:08:00,358 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 819 msec 2024-11-25T17:08:00,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-25T17:08:00,661 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-25T17:08:00,668 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x24869052 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63607639 2024-11-25T17:08:00,672 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e67f019, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:00,675 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:00,677 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40156, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:00,680 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T17:08:00,683 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46830, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T17:08:00,691 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62c08aa2 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53b8a93e 2024-11-25T17:08:00,696 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5095ba91, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:00,697 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79d38d10 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f343a4d 2024-11-25T17:08:00,705 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c63ae4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:00,706 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72b32f98 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1324ee83 2024-11-25T17:08:00,714 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62c43377, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:00,715 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x736f1673 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@478bae6b 2024-11-25T17:08:00,737 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ee2166f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:00,738 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bbb5d8a to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@48068a5 2024-11-25T17:08:00,775 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a8f4734, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:00,776 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x03883f7b to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b5f27aa 2024-11-25T17:08:00,806 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10c964e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:00,807 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4b5cad1a to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@295cb1ac 2024-11-25T17:08:00,867 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72e97e4b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:00,869 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70267494 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@490457fd 2024-11-25T17:08:00,883 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527c6d40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:00,884 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x767a8485 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1d2a8e08 2024-11-25T17:08:00,890 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c8de680, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:00,896 DEBUG [hconnection-0x4360c845-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:00,897 DEBUG [hconnection-0x2216ae23-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:00,897 DEBUG [hconnection-0x5d4164ec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:00,900 DEBUG [hconnection-0x53f1d8ba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:00,901 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:00,901 DEBUG [hconnection-0x4872f2f9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:00,902 DEBUG [hconnection-0x3d0fdd06-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:00,904 DEBUG [hconnection-0x2dd15ceb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:00,904 DEBUG [hconnection-0x40eb5aeb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:00,905 DEBUG [hconnection-0x7c0bf2ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:00,907 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40172, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:00,907 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40182, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:00,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-25T17:08:00,909 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40198, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:00,910 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40192, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:00,912 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:00,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-25T17:08:00,913 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:00,914 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:00,915 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:00,918 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40218, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:00,918 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40222, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:00,921 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40224, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:00,931 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40230, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:00,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:00,996 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:08:01,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:01,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:01,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:01,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:01,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:01,007 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:01,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-25T17:08:01,087 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:01,089 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-25T17:08:01,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:01,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:01,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:01,095 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554541142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554541157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554541158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554541159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554541159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-25T17:08:01,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/a216b78599644a03a713e555a0667e4f is 50, key is test_row_0/A:col10/1732554480981/Put/seqid=0 2024-11-25T17:08:01,273 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:01,274 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-25T17:08:01,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:01,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:01,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:01,296 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741839_1015 (size=12001) 2024-11-25T17:08:01,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554541324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554541324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554541324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554541324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554541325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,455 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:01,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-25T17:08:01,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:01,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:01,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:01,474 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-25T17:08:01,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554541533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,539 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554541534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554541540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554541539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554541547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,628 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:01,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-25T17:08:01,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:01,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:01,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:01,630 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,702 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/a216b78599644a03a713e555a0667e4f 2024-11-25T17:08:01,786 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:01,787 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-25T17:08:01,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:01,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:01,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:01,788 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/2e729f8171f14fe49fcd73c35eed26da is 50, key is test_row_0/B:col10/1732554480981/Put/seqid=0 2024-11-25T17:08:01,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741840_1016 (size=12001) 2024-11-25T17:08:01,817 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/2e729f8171f14fe49fcd73c35eed26da 2024-11-25T17:08:01,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554541847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554541849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554541851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554541852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:01,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554541848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:01,889 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/2d4ef9af0d6b42419eac9be978ad2e67 is 50, key is test_row_0/C:col10/1732554480981/Put/seqid=0 2024-11-25T17:08:01,944 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:01,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-25T17:08:01,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:01,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:01,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:01,946 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:01,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741841_1017 (size=12001) 2024-11-25T17:08:01,961 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/2d4ef9af0d6b42419eac9be978ad2e67 2024-11-25T17:08:01,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/a216b78599644a03a713e555a0667e4f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a216b78599644a03a713e555a0667e4f 2024-11-25T17:08:01,985 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a216b78599644a03a713e555a0667e4f, entries=150, sequenceid=12, filesize=11.7 K 2024-11-25T17:08:02,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/2e729f8171f14fe49fcd73c35eed26da as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/2e729f8171f14fe49fcd73c35eed26da 2024-11-25T17:08:02,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-25T17:08:02,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/2e729f8171f14fe49fcd73c35eed26da, entries=150, sequenceid=12, filesize=11.7 K 2024-11-25T17:08:02,043 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/2d4ef9af0d6b42419eac9be978ad2e67 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/2d4ef9af0d6b42419eac9be978ad2e67 2024-11-25T17:08:02,061 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/2d4ef9af0d6b42419eac9be978ad2e67, entries=150, sequenceid=12, filesize=11.7 K 2024-11-25T17:08:02,063 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 140432b4069c8ca485d8f3971c9e31fe in 1067ms, sequenceid=12, compaction requested=false 2024-11-25T17:08:02,065 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-25T17:08:02,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:02,104 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:02,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-25T17:08:02,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:02,105 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-25T17:08:02,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:02,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:02,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:02,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:02,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:02,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:02,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/8be079adbcd749ee8a569dfcbcabee53 is 50, key is test_row_0/A:col10/1732554481140/Put/seqid=0 2024-11-25T17:08:02,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741842_1018 (size=12001) 2024-11-25T17:08:02,189 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/8be079adbcd749ee8a569dfcbcabee53 2024-11-25T17:08:02,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/c404eafedd324277a1ca43cffd53d554 is 50, key is test_row_0/B:col10/1732554481140/Put/seqid=0 2024-11-25T17:08:02,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741843_1019 (size=12001) 2024-11-25T17:08:02,277 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/c404eafedd324277a1ca43cffd53d554 2024-11-25T17:08:02,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/efd100a4069b48f2bf82d9c26926584e is 50, key is test_row_0/C:col10/1732554481140/Put/seqid=0 2024-11-25T17:08:02,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741844_1020 (size=12001) 2024-11-25T17:08:02,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:02,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:02,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:02,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554542392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:02,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:02,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554542391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:02,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:02,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554542403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:02,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:02,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554542405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:02,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:02,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554542406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:02,493 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T17:08:02,495 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-25T17:08:02,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:02,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554542510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:02,539 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:02,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554542512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:02,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:02,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554542521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:02,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:02,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554542523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:02,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:02,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554542527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:02,724 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:02,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554542720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:02,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:02,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554542743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:02,749 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/efd100a4069b48f2bf82d9c26926584e 2024-11-25T17:08:02,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:02,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554542749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:02,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:02,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554542752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:02,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:02,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554542755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:02,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/8be079adbcd749ee8a569dfcbcabee53 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/8be079adbcd749ee8a569dfcbcabee53 2024-11-25T17:08:02,781 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/8be079adbcd749ee8a569dfcbcabee53, entries=150, sequenceid=37, filesize=11.7 K 2024-11-25T17:08:02,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/c404eafedd324277a1ca43cffd53d554 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/c404eafedd324277a1ca43cffd53d554 2024-11-25T17:08:02,798 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/c404eafedd324277a1ca43cffd53d554, entries=150, sequenceid=37, filesize=11.7 K 2024-11-25T17:08:02,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/efd100a4069b48f2bf82d9c26926584e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/efd100a4069b48f2bf82d9c26926584e 2024-11-25T17:08:02,814 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/efd100a4069b48f2bf82d9c26926584e, entries=150, sequenceid=37, filesize=11.7 K 2024-11-25T17:08:02,817 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 140432b4069c8ca485d8f3971c9e31fe in 711ms, sequenceid=37, compaction requested=false 2024-11-25T17:08:02,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:02,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:02,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-25T17:08:02,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-25T17:08:02,823 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-25T17:08:02,823 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9040 sec 2024-11-25T17:08:02,826 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.9200 sec 2024-11-25T17:08:03,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-25T17:08:03,031 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-25T17:08:03,034 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:03,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-25T17:08:03,037 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-25T17:08:03,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:03,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:03,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:03,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:03,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:03,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:03,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:03,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-25T17:08:03,039 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:03,040 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:03,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:03,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/7282eb3bf6ec4351a244666fed03663c is 50, key is test_row_0/A:col10/1732554483033/Put/seqid=0 2024-11-25T17:08:03,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741845_1021 (size=14341) 2024-11-25T17:08:03,065 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/7282eb3bf6ec4351a244666fed03663c 2024-11-25T17:08:03,093 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/21ab45edca22440fa4a231e7ff45fe6b is 50, key is test_row_0/B:col10/1732554483033/Put/seqid=0 2024-11-25T17:08:03,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554543115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554543115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554543116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554543120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554543122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741846_1022 (size=12001) 2024-11-25T17:08:03,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/21ab45edca22440fa4a231e7ff45fe6b 2024-11-25T17:08:03,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-25T17:08:03,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/543ef6374d95495a880a7e522913bb9e is 50, key is test_row_0/C:col10/1732554483033/Put/seqid=0 2024-11-25T17:08:03,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741847_1023 (size=12001) 2024-11-25T17:08:03,202 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/543ef6374d95495a880a7e522913bb9e 2024-11-25T17:08:03,206 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:03,206 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-25T17:08:03,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:03,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:03,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:03,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:03,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:03,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:03,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/7282eb3bf6ec4351a244666fed03663c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/7282eb3bf6ec4351a244666fed03663c 2024-11-25T17:08:03,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554543229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554543231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/7282eb3bf6ec4351a244666fed03663c, entries=200, sequenceid=49, filesize=14.0 K 2024-11-25T17:08:03,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554543231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/21ab45edca22440fa4a231e7ff45fe6b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/21ab45edca22440fa4a231e7ff45fe6b 2024-11-25T17:08:03,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554543232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,247 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-25T17:08:03,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554543233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,257 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/21ab45edca22440fa4a231e7ff45fe6b, entries=150, sequenceid=49, filesize=11.7 K 2024-11-25T17:08:03,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/543ef6374d95495a880a7e522913bb9e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/543ef6374d95495a880a7e522913bb9e 2024-11-25T17:08:03,269 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/543ef6374d95495a880a7e522913bb9e, entries=150, sequenceid=49, filesize=11.7 K 2024-11-25T17:08:03,271 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 140432b4069c8ca485d8f3971c9e31fe in 235ms, sequenceid=49, compaction requested=true 2024-11-25T17:08:03,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:03,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:03,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:03,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:03,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:03,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:03,290 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:03,290 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:03,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:03,295 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:03,295 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:03,297 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/B is initiating minor compaction (all files) 2024-11-25T17:08:03,297 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/A is initiating minor compaction (all files) 2024-11-25T17:08:03,297 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/B in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:03,297 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/A in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:03,298 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a216b78599644a03a713e555a0667e4f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/8be079adbcd749ee8a569dfcbcabee53, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/7282eb3bf6ec4351a244666fed03663c] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=37.4 K 2024-11-25T17:08:03,298 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/2e729f8171f14fe49fcd73c35eed26da, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/c404eafedd324277a1ca43cffd53d554, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/21ab45edca22440fa4a231e7ff45fe6b] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=35.2 K 2024-11-25T17:08:03,299 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e729f8171f14fe49fcd73c35eed26da, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732554480970 2024-11-25T17:08:03,300 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting c404eafedd324277a1ca43cffd53d554, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732554481140 2024-11-25T17:08:03,301 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 21ab45edca22440fa4a231e7ff45fe6b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732554482375 2024-11-25T17:08:03,301 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting a216b78599644a03a713e555a0667e4f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732554480970 2024-11-25T17:08:03,303 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8be079adbcd749ee8a569dfcbcabee53, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732554481140 2024-11-25T17:08:03,304 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7282eb3bf6ec4351a244666fed03663c, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732554482375 2024-11-25T17:08:03,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-25T17:08:03,348 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#B#compaction#9 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:03,349 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/0cc9aa268a60446aa1d7412bc5ddfb6b is 50, key is test_row_0/B:col10/1732554483033/Put/seqid=0 2024-11-25T17:08:03,353 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#A#compaction#10 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:03,354 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/e4cb729e85e94f0583e5757ae0f1f8a9 is 50, key is test_row_0/A:col10/1732554483033/Put/seqid=0 2024-11-25T17:08:03,367 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:03,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-25T17:08:03,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:03,369 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-25T17:08:03,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:03,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:03,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:03,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:03,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:03,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:03,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741848_1024 (size=12104) 2024-11-25T17:08:03,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741849_1025 (size=12104) 2024-11-25T17:08:03,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/d181c0fc5da14b109679fe217291dca5 is 50, key is test_row_0/A:col10/1732554483119/Put/seqid=0 2024-11-25T17:08:03,451 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/e4cb729e85e94f0583e5757ae0f1f8a9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/e4cb729e85e94f0583e5757ae0f1f8a9 2024-11-25T17:08:03,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:03,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:03,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741850_1026 (size=12001) 2024-11-25T17:08:03,484 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/d181c0fc5da14b109679fe217291dca5 2024-11-25T17:08:03,501 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/A of 140432b4069c8ca485d8f3971c9e31fe into e4cb729e85e94f0583e5757ae0f1f8a9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:03,501 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:03,501 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/A, priority=13, startTime=1732554483273; duration=0sec 2024-11-25T17:08:03,502 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:03,502 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:A 2024-11-25T17:08:03,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554543492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,504 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:03,513 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:03,513 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/C is initiating minor compaction (all files) 2024-11-25T17:08:03,514 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/C in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:03,514 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/2d4ef9af0d6b42419eac9be978ad2e67, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/efd100a4069b48f2bf82d9c26926584e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/543ef6374d95495a880a7e522913bb9e] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=35.2 K 2024-11-25T17:08:03,515 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d4ef9af0d6b42419eac9be978ad2e67, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732554480970 2024-11-25T17:08:03,516 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting efd100a4069b48f2bf82d9c26926584e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732554481140 2024-11-25T17:08:03,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/8a1e4e0c56634f519e19bbd6455c086d is 50, key is test_row_0/B:col10/1732554483119/Put/seqid=0 2024-11-25T17:08:03,518 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 543ef6374d95495a880a7e522913bb9e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732554482375 2024-11-25T17:08:03,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554543499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,528 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554543500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554543503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554543501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741851_1027 (size=12001) 2024-11-25T17:08:03,562 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/8a1e4e0c56634f519e19bbd6455c086d 2024-11-25T17:08:03,564 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#C#compaction#13 average throughput is 0.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:03,565 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/e770b63cac054ffeb33db74be560b6d8 is 50, key is test_row_0/C:col10/1732554483033/Put/seqid=0 2024-11-25T17:08:03,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/cd5c57ab104e4d9b895febde7bc0d0c9 is 50, key is test_row_0/C:col10/1732554483119/Put/seqid=0 2024-11-25T17:08:03,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741852_1028 (size=12104) 2024-11-25T17:08:03,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554543606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,623 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/e770b63cac054ffeb33db74be560b6d8 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/e770b63cac054ffeb33db74be560b6d8 2024-11-25T17:08:03,636 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/C of 140432b4069c8ca485d8f3971c9e31fe into e770b63cac054ffeb33db74be560b6d8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:03,637 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:03,637 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/C, priority=13, startTime=1732554483290; duration=0sec 2024-11-25T17:08:03,637 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:03,637 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:C 2024-11-25T17:08:03,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554543636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554543630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554543637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-25T17:08:03,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554543639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741853_1029 (size=12001) 2024-11-25T17:08:03,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554543813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,840 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/0cc9aa268a60446aa1d7412bc5ddfb6b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/0cc9aa268a60446aa1d7412bc5ddfb6b 2024-11-25T17:08:03,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554543844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554543846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554543846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:03,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554543846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:03,870 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/B of 140432b4069c8ca485d8f3971c9e31fe into 0cc9aa268a60446aa1d7412bc5ddfb6b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:03,870 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:03,870 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/B, priority=13, startTime=1732554483290; duration=0sec 2024-11-25T17:08:03,870 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:03,870 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:B 2024-11-25T17:08:04,083 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/cd5c57ab104e4d9b895febde7bc0d0c9 2024-11-25T17:08:04,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/d181c0fc5da14b109679fe217291dca5 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d181c0fc5da14b109679fe217291dca5 2024-11-25T17:08:04,112 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d181c0fc5da14b109679fe217291dca5, entries=150, sequenceid=73, filesize=11.7 K 2024-11-25T17:08:04,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/8a1e4e0c56634f519e19bbd6455c086d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8a1e4e0c56634f519e19bbd6455c086d 2024-11-25T17:08:04,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554544121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,130 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8a1e4e0c56634f519e19bbd6455c086d, entries=150, sequenceid=73, filesize=11.7 K 2024-11-25T17:08:04,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/cd5c57ab104e4d9b895febde7bc0d0c9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/cd5c57ab104e4d9b895febde7bc0d0c9 2024-11-25T17:08:04,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-25T17:08:04,154 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/cd5c57ab104e4d9b895febde7bc0d0c9, entries=150, sequenceid=73, filesize=11.7 K 2024-11-25T17:08:04,157 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 140432b4069c8ca485d8f3971c9e31fe in 787ms, sequenceid=73, compaction requested=false 2024-11-25T17:08:04,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:04,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:04,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-25T17:08:04,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:04,158 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-25T17:08:04,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-25T17:08:04,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:04,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:04,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:04,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:04,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:04,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:04,162 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-25T17:08:04,163 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1190 sec 2024-11-25T17:08:04,166 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.1290 sec 2024-11-25T17:08:04,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/1d94363949f24a0d84ed2e7a367e8bde is 50, key is test_row_0/A:col10/1732554483500/Put/seqid=0 2024-11-25T17:08:04,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741854_1030 (size=14341) 2024-11-25T17:08:04,193 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/1d94363949f24a0d84ed2e7a367e8bde 2024-11-25T17:08:04,193 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T17:08:04,193 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-25T17:08:04,196 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-25T17:08:04,196 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-25T17:08:04,197 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T17:08:04,197 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-25T17:08:04,198 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-25T17:08:04,198 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-25T17:08:04,199 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-25T17:08:04,199 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-25T17:08:04,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/f5468bc8ca2c450d87ecc3ce4d5f78ad is 50, key is test_row_0/B:col10/1732554483500/Put/seqid=0 2024-11-25T17:08:04,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554544221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554544225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554544225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554544225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741855_1031 (size=12001) 2024-11-25T17:08:04,281 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/f5468bc8ca2c450d87ecc3ce4d5f78ad 2024-11-25T17:08:04,311 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/4e146e178e214ce78cd0345387a10fc4 is 50, key is test_row_0/C:col10/1732554483500/Put/seqid=0 2024-11-25T17:08:04,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554544328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554544332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554544334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554544334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741856_1032 (size=12001) 2024-11-25T17:08:04,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/4e146e178e214ce78cd0345387a10fc4 2024-11-25T17:08:04,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/1d94363949f24a0d84ed2e7a367e8bde as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/1d94363949f24a0d84ed2e7a367e8bde 2024-11-25T17:08:04,375 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/1d94363949f24a0d84ed2e7a367e8bde, entries=200, sequenceid=89, filesize=14.0 K 2024-11-25T17:08:04,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/f5468bc8ca2c450d87ecc3ce4d5f78ad as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/f5468bc8ca2c450d87ecc3ce4d5f78ad 2024-11-25T17:08:04,411 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/f5468bc8ca2c450d87ecc3ce4d5f78ad, entries=150, sequenceid=89, filesize=11.7 K 2024-11-25T17:08:04,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/4e146e178e214ce78cd0345387a10fc4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/4e146e178e214ce78cd0345387a10fc4 2024-11-25T17:08:04,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/4e146e178e214ce78cd0345387a10fc4, entries=150, sequenceid=89, filesize=11.7 K 2024-11-25T17:08:04,434 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 140432b4069c8ca485d8f3971c9e31fe in 275ms, sequenceid=89, compaction requested=true 2024-11-25T17:08:04,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:04,434 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:04,437 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:04,437 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/A is initiating minor compaction (all files) 2024-11-25T17:08:04,437 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/A in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:04,438 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/e4cb729e85e94f0583e5757ae0f1f8a9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d181c0fc5da14b109679fe217291dca5, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/1d94363949f24a0d84ed2e7a367e8bde] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=37.5 K 2024-11-25T17:08:04,439 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting e4cb729e85e94f0583e5757ae0f1f8a9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732554482375 2024-11-25T17:08:04,439 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting d181c0fc5da14b109679fe217291dca5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732554483107 2024-11-25T17:08:04,440 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d94363949f24a0d84ed2e7a367e8bde, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732554483494 2024-11-25T17:08:04,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:04,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:04,455 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:04,456 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:04,457 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/B is initiating minor compaction (all files) 2024-11-25T17:08:04,457 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/B in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:04,457 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/0cc9aa268a60446aa1d7412bc5ddfb6b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8a1e4e0c56634f519e19bbd6455c086d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/f5468bc8ca2c450d87ecc3ce4d5f78ad] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=35.3 K 2024-11-25T17:08:04,458 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0cc9aa268a60446aa1d7412bc5ddfb6b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732554482375 2024-11-25T17:08:04,459 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a1e4e0c56634f519e19bbd6455c086d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732554483107 2024-11-25T17:08:04,459 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5468bc8ca2c450d87ecc3ce4d5f78ad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732554483497 2024-11-25T17:08:04,474 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#A#compaction#18 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:04,475 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/6709bdb2f1964b5d8cb472f27264bdeb is 50, key is test_row_0/A:col10/1732554483500/Put/seqid=0 2024-11-25T17:08:04,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:04,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:04,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:04,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:04,480 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#B#compaction#19 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:04,481 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/582bcf1db0304ef98e2a0d4ee8ee6b6e is 50, key is test_row_0/B:col10/1732554483500/Put/seqid=0 2024-11-25T17:08:04,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741857_1033 (size=12207) 2024-11-25T17:08:04,519 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/6709bdb2f1964b5d8cb472f27264bdeb as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/6709bdb2f1964b5d8cb472f27264bdeb 2024-11-25T17:08:04,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741858_1034 (size=12207) 2024-11-25T17:08:04,540 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/A of 140432b4069c8ca485d8f3971c9e31fe into 6709bdb2f1964b5d8cb472f27264bdeb(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:04,540 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:04,541 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/A, priority=13, startTime=1732554484434; duration=0sec 2024-11-25T17:08:04,541 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:04,541 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:A 2024-11-25T17:08:04,541 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:04,548 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:04,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:04,548 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/582bcf1db0304ef98e2a0d4ee8ee6b6e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/582bcf1db0304ef98e2a0d4ee8ee6b6e 2024-11-25T17:08:04,548 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/C is initiating minor compaction (all files) 2024-11-25T17:08:04,549 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/C in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:04,549 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/e770b63cac054ffeb33db74be560b6d8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/cd5c57ab104e4d9b895febde7bc0d0c9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/4e146e178e214ce78cd0345387a10fc4] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=35.3 K 2024-11-25T17:08:04,551 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting e770b63cac054ffeb33db74be560b6d8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732554482375 2024-11-25T17:08:04,553 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting cd5c57ab104e4d9b895febde7bc0d0c9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732554483107 2024-11-25T17:08:04,561 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-25T17:08:04,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:04,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:04,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:04,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:04,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:04,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:04,570 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e146e178e214ce78cd0345387a10fc4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732554483497 2024-11-25T17:08:04,586 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/5d15fe563ccb4aa6a70288fd09f07933 is 50, key is test_row_0/A:col10/1732554484554/Put/seqid=0 2024-11-25T17:08:04,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554544575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,595 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/B of 140432b4069c8ca485d8f3971c9e31fe into 582bcf1db0304ef98e2a0d4ee8ee6b6e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:04,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554544589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,595 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:04,595 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/B, priority=13, startTime=1732554484454; duration=0sec 2024-11-25T17:08:04,595 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:04,595 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:B 2024-11-25T17:08:04,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554544590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554544591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,600 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#C#compaction#21 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:04,602 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/9f86d23738f54701a3109ad6bebc2255 is 50, key is test_row_0/C:col10/1732554483500/Put/seqid=0 2024-11-25T17:08:04,629 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554544628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741860_1036 (size=12207) 2024-11-25T17:08:04,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741859_1035 (size=12001) 2024-11-25T17:08:04,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554544693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554544697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554544701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554544704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554544898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554544903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554544908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:04,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:04,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554544913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:05,051 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/5d15fe563ccb4aa6a70288fd09f07933 2024-11-25T17:08:05,085 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/9f86d23738f54701a3109ad6bebc2255 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9f86d23738f54701a3109ad6bebc2255 2024-11-25T17:08:05,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/ee95d568b8c946d7a95db4fd97bbbd8a is 50, key is test_row_0/B:col10/1732554484554/Put/seqid=0 2024-11-25T17:08:05,105 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/C of 140432b4069c8ca485d8f3971c9e31fe into 9f86d23738f54701a3109ad6bebc2255(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:05,106 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:05,106 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/C, priority=13, startTime=1732554484477; duration=0sec 2024-11-25T17:08:05,106 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:05,106 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:C 2024-11-25T17:08:05,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741861_1037 (size=12001) 2024-11-25T17:08:05,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-25T17:08:05,146 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-25T17:08:05,149 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:05,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-25T17:08:05,155 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:05,157 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:05,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-25T17:08:05,158 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:05,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:05,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554545204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:05,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:05,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554545210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:05,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:05,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554545214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:05,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:05,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554545219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:05,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-25T17:08:05,313 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:05,313 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-25T17:08:05,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:05,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:05,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:05,314 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:05,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:05,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:05,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-25T17:08:05,470 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:05,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-25T17:08:05,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:05,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:05,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:05,471 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:05,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:05,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:05,520 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/ee95d568b8c946d7a95db4fd97bbbd8a 2024-11-25T17:08:05,547 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/f88361ef67624afca17e03dde5833939 is 50, key is test_row_0/C:col10/1732554484554/Put/seqid=0 2024-11-25T17:08:05,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741862_1038 (size=12001) 2024-11-25T17:08:05,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/f88361ef67624afca17e03dde5833939 2024-11-25T17:08:05,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/5d15fe563ccb4aa6a70288fd09f07933 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/5d15fe563ccb4aa6a70288fd09f07933 2024-11-25T17:08:05,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/5d15fe563ccb4aa6a70288fd09f07933, entries=150, sequenceid=118, filesize=11.7 K 2024-11-25T17:08:05,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/ee95d568b8c946d7a95db4fd97bbbd8a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ee95d568b8c946d7a95db4fd97bbbd8a 2024-11-25T17:08:05,625 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:05,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-25T17:08:05,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:05,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:05,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:05,626 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:05,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:05,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:05,635 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ee95d568b8c946d7a95db4fd97bbbd8a, entries=150, sequenceid=118, filesize=11.7 K 2024-11-25T17:08:05,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/f88361ef67624afca17e03dde5833939 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/f88361ef67624afca17e03dde5833939 2024-11-25T17:08:05,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:05,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554545637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:05,650 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/f88361ef67624afca17e03dde5833939, entries=150, sequenceid=118, filesize=11.7 K 2024-11-25T17:08:05,652 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 140432b4069c8ca485d8f3971c9e31fe in 1090ms, sequenceid=118, compaction requested=false 2024-11-25T17:08:05,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:05,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:08:05,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:05,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:05,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:05,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:05,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:05,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:05,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:05,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-25T17:08:05,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/a25964cadf1245edac6f6b69475a8519 is 50, key is test_row_0/A:col10/1732554485745/Put/seqid=0 2024-11-25T17:08:05,782 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:05,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-25T17:08:05,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:05,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:05,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:05,784 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:05,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:05,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:05,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741863_1039 (size=12101) 2024-11-25T17:08:05,838 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/a25964cadf1245edac6f6b69475a8519 2024-11-25T17:08:05,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:05,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554545836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:05,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554545839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:05,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:05,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554545841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:05,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:05,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554545841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:05,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/eaa3be1f0aa5443d8e2e91f5bf655476 is 50, key is test_row_0/B:col10/1732554485745/Put/seqid=0 2024-11-25T17:08:05,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741864_1040 (size=12101) 2024-11-25T17:08:05,882 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/eaa3be1f0aa5443d8e2e91f5bf655476 2024-11-25T17:08:05,901 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/11582c3278634c79a335db9f7ef91785 is 50, key is test_row_0/C:col10/1732554485745/Put/seqid=0 2024-11-25T17:08:05,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741865_1041 (size=12101) 2024-11-25T17:08:05,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/11582c3278634c79a335db9f7ef91785 2024-11-25T17:08:05,937 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:05,939 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-25T17:08:05,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:05,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:05,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:05,940 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:05,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:05,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:05,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/a25964cadf1245edac6f6b69475a8519 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a25964cadf1245edac6f6b69475a8519 2024-11-25T17:08:05,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:05,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554545944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:05,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:05,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554545950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:05,957 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a25964cadf1245edac6f6b69475a8519, entries=150, sequenceid=132, filesize=11.8 K 2024-11-25T17:08:05,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/eaa3be1f0aa5443d8e2e91f5bf655476 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/eaa3be1f0aa5443d8e2e91f5bf655476 2024-11-25T17:08:05,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:05,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554545951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:05,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:05,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554545951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:05,971 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/eaa3be1f0aa5443d8e2e91f5bf655476, entries=150, sequenceid=132, filesize=11.8 K 2024-11-25T17:08:05,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/11582c3278634c79a335db9f7ef91785 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/11582c3278634c79a335db9f7ef91785 2024-11-25T17:08:05,986 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/11582c3278634c79a335db9f7ef91785, entries=150, sequenceid=132, filesize=11.8 K 2024-11-25T17:08:05,988 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 140432b4069c8ca485d8f3971c9e31fe in 236ms, sequenceid=132, compaction requested=true 2024-11-25T17:08:05,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:05,989 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:05,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:05,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:05,989 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:05,991 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:05,991 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/A is initiating minor compaction (all files) 2024-11-25T17:08:05,991 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:05,991 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/B is initiating minor compaction (all files) 2024-11-25T17:08:05,991 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/A in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:05,991 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/B in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:05,991 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/6709bdb2f1964b5d8cb472f27264bdeb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/5d15fe563ccb4aa6a70288fd09f07933, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a25964cadf1245edac6f6b69475a8519] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=35.5 K 2024-11-25T17:08:05,991 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/582bcf1db0304ef98e2a0d4ee8ee6b6e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ee95d568b8c946d7a95db4fd97bbbd8a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/eaa3be1f0aa5443d8e2e91f5bf655476] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=35.5 K 2024-11-25T17:08:05,992 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 582bcf1db0304ef98e2a0d4ee8ee6b6e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732554483497 2024-11-25T17:08:05,992 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 6709bdb2f1964b5d8cb472f27264bdeb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732554483497 2024-11-25T17:08:05,993 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee95d568b8c946d7a95db4fd97bbbd8a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732554484547 2024-11-25T17:08:05,993 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d15fe563ccb4aa6a70288fd09f07933, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732554484547 2024-11-25T17:08:05,994 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting a25964cadf1245edac6f6b69475a8519, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732554484581 2024-11-25T17:08:05,994 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting eaa3be1f0aa5443d8e2e91f5bf655476, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732554484581 2024-11-25T17:08:05,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:05,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:05,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:05,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:06,016 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#A#compaction#27 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:06,017 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/576edb3098184db88c3fad5ebd0cd1f9 is 50, key is test_row_0/A:col10/1732554485745/Put/seqid=0 2024-11-25T17:08:06,033 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#B#compaction#28 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:06,034 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/db73f8df4db140f0954bc90dd97207f4 is 50, key is test_row_0/B:col10/1732554485745/Put/seqid=0 2024-11-25T17:08:06,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741866_1042 (size=12409) 2024-11-25T17:08:06,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741867_1043 (size=12409) 2024-11-25T17:08:06,094 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:06,096 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-25T17:08:06,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:06,097 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-25T17:08:06,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:06,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:06,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:06,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:06,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:06,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:06,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/1a5b06d3c41944b1b752c98c501218ac is 50, key is test_row_0/A:col10/1732554485838/Put/seqid=0 2024-11-25T17:08:06,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741868_1044 (size=12151) 2024-11-25T17:08:06,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:06,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:06,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554546194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:06,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554546203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:06,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554546208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:06,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554546209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-25T17:08:06,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554546312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:06,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554546314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:06,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:06,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554546317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:06,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554546319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:06,456 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/576edb3098184db88c3fad5ebd0cd1f9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/576edb3098184db88c3fad5ebd0cd1f9 2024-11-25T17:08:06,477 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/A of 140432b4069c8ca485d8f3971c9e31fe into 576edb3098184db88c3fad5ebd0cd1f9(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:06,477 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:06,477 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/A, priority=13, startTime=1732554485988; duration=0sec 2024-11-25T17:08:06,478 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:06,478 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:A 2024-11-25T17:08:06,478 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:06,480 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:06,480 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/C is initiating minor compaction (all files) 2024-11-25T17:08:06,481 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/C in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:06,481 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9f86d23738f54701a3109ad6bebc2255, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/f88361ef67624afca17e03dde5833939, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/11582c3278634c79a335db9f7ef91785] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=35.5 K 2024-11-25T17:08:06,481 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/db73f8df4db140f0954bc90dd97207f4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/db73f8df4db140f0954bc90dd97207f4 2024-11-25T17:08:06,482 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f86d23738f54701a3109ad6bebc2255, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732554483497 2024-11-25T17:08:06,483 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting f88361ef67624afca17e03dde5833939, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732554484547 2024-11-25T17:08:06,483 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 11582c3278634c79a335db9f7ef91785, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732554484581 2024-11-25T17:08:06,496 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/B of 140432b4069c8ca485d8f3971c9e31fe into db73f8df4db140f0954bc90dd97207f4(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:06,497 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:06,497 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/B, priority=13, startTime=1732554485989; duration=0sec 2024-11-25T17:08:06,500 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:06,500 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:B 2024-11-25T17:08:06,506 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#C#compaction#30 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:06,507 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/cbb7d54c46a14904bc7474c322b1c88e is 50, key is test_row_0/C:col10/1732554485745/Put/seqid=0 2024-11-25T17:08:06,522 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:06,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554546519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:06,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:06,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554546519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:06,526 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:06,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554546524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:06,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:06,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554546523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:06,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741869_1045 (size=12409) 2024-11-25T17:08:06,569 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/1a5b06d3c41944b1b752c98c501218ac 2024-11-25T17:08:06,570 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/cbb7d54c46a14904bc7474c322b1c88e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/cbb7d54c46a14904bc7474c322b1c88e 2024-11-25T17:08:06,582 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/C of 140432b4069c8ca485d8f3971c9e31fe into cbb7d54c46a14904bc7474c322b1c88e(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:06,582 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:06,582 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/C, priority=13, startTime=1732554485995; duration=0sec 2024-11-25T17:08:06,582 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:06,582 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:C 2024-11-25T17:08:06,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/3e1e0983e6104b2496c3c14cd3be9c89 is 50, key is test_row_0/B:col10/1732554485838/Put/seqid=0 2024-11-25T17:08:06,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741870_1046 (size=12151) 2024-11-25T17:08:06,619 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/3e1e0983e6104b2496c3c14cd3be9c89 2024-11-25T17:08:06,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/31f8115dbe0445e181b0f8c44934fa94 is 50, key is test_row_0/C:col10/1732554485838/Put/seqid=0 2024-11-25T17:08:06,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741871_1047 (size=12151) 2024-11-25T17:08:06,659 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/31f8115dbe0445e181b0f8c44934fa94 2024-11-25T17:08:06,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/1a5b06d3c41944b1b752c98c501218ac as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/1a5b06d3c41944b1b752c98c501218ac 2024-11-25T17:08:06,686 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/1a5b06d3c41944b1b752c98c501218ac, entries=150, sequenceid=157, filesize=11.9 K 2024-11-25T17:08:06,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/3e1e0983e6104b2496c3c14cd3be9c89 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/3e1e0983e6104b2496c3c14cd3be9c89 2024-11-25T17:08:06,735 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/3e1e0983e6104b2496c3c14cd3be9c89, entries=150, sequenceid=157, filesize=11.9 K 2024-11-25T17:08:06,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/31f8115dbe0445e181b0f8c44934fa94 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/31f8115dbe0445e181b0f8c44934fa94 2024-11-25T17:08:06,755 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/31f8115dbe0445e181b0f8c44934fa94, entries=150, sequenceid=157, filesize=11.9 K 2024-11-25T17:08:06,757 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 140432b4069c8ca485d8f3971c9e31fe in 661ms, sequenceid=157, compaction requested=false 2024-11-25T17:08:06,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:06,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:06,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-25T17:08:06,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-25T17:08:06,771 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-25T17:08:06,771 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6030 sec 2024-11-25T17:08:06,778 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.6230 sec 2024-11-25T17:08:06,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:06,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-25T17:08:06,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:06,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:06,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:06,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:06,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:06,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:06,851 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/436bb8f8da774b64a2ff43574b565192 is 50, key is test_row_0/A:col10/1732554486834/Put/seqid=0 2024-11-25T17:08:06,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741872_1048 (size=12151) 2024-11-25T17:08:06,875 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/436bb8f8da774b64a2ff43574b565192 2024-11-25T17:08:06,891 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/68bc0f5743054ad88583985b65b48cf7 is 50, key is test_row_0/B:col10/1732554486834/Put/seqid=0 2024-11-25T17:08:06,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:06,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554546889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:06,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:06,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554546892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:06,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:06,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554546893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:06,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:06,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554546895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:06,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741873_1049 (size=12151) 2024-11-25T17:08:06,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/68bc0f5743054ad88583985b65b48cf7 2024-11-25T17:08:06,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/07393e512fca4910b70e3ba3e8b8334b is 50, key is test_row_0/C:col10/1732554486834/Put/seqid=0 2024-11-25T17:08:06,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741874_1050 (size=12151) 2024-11-25T17:08:06,952 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/07393e512fca4910b70e3ba3e8b8334b 2024-11-25T17:08:06,962 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/436bb8f8da774b64a2ff43574b565192 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/436bb8f8da774b64a2ff43574b565192 2024-11-25T17:08:06,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/436bb8f8da774b64a2ff43574b565192, entries=150, sequenceid=172, filesize=11.9 K 2024-11-25T17:08:06,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/68bc0f5743054ad88583985b65b48cf7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/68bc0f5743054ad88583985b65b48cf7 2024-11-25T17:08:06,997 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/68bc0f5743054ad88583985b65b48cf7, entries=150, sequenceid=172, filesize=11.9 K 2024-11-25T17:08:06,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/07393e512fca4910b70e3ba3e8b8334b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/07393e512fca4910b70e3ba3e8b8334b 2024-11-25T17:08:07,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554546999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554546999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554547000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554547000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,021 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/07393e512fca4910b70e3ba3e8b8334b, entries=150, sequenceid=172, filesize=11.9 K 2024-11-25T17:08:07,025 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 140432b4069c8ca485d8f3971c9e31fe in 188ms, sequenceid=172, compaction requested=true 2024-11-25T17:08:07,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:07,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:07,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:07,025 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:07,025 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:07,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:07,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:07,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:07,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:07,027 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:07,027 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/B is initiating minor compaction (all files) 2024-11-25T17:08:07,027 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/B in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:07,027 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/db73f8df4db140f0954bc90dd97207f4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/3e1e0983e6104b2496c3c14cd3be9c89, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/68bc0f5743054ad88583985b65b48cf7] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=35.9 K 2024-11-25T17:08:07,028 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:07,028 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/A is initiating minor compaction (all files) 2024-11-25T17:08:07,028 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/A in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:07,029 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/576edb3098184db88c3fad5ebd0cd1f9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/1a5b06d3c41944b1b752c98c501218ac, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/436bb8f8da774b64a2ff43574b565192] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=35.9 K 2024-11-25T17:08:07,029 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting db73f8df4db140f0954bc90dd97207f4, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732554484581 2024-11-25T17:08:07,029 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 576edb3098184db88c3fad5ebd0cd1f9, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732554484581 2024-11-25T17:08:07,030 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e1e0983e6104b2496c3c14cd3be9c89, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732554485811 2024-11-25T17:08:07,031 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 68bc0f5743054ad88583985b65b48cf7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732554486191 2024-11-25T17:08:07,031 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a5b06d3c41944b1b752c98c501218ac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732554485811 2024-11-25T17:08:07,032 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 436bb8f8da774b64a2ff43574b565192, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732554486191 2024-11-25T17:08:07,052 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#B#compaction#36 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:07,057 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/815a08e092b34601bcf917602482f2eb is 50, key is test_row_0/B:col10/1732554486834/Put/seqid=0 2024-11-25T17:08:07,071 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#A#compaction#37 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:07,072 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/205708f2209f458cb0dc65cbc93934f4 is 50, key is test_row_0/A:col10/1732554486834/Put/seqid=0 2024-11-25T17:08:07,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741875_1051 (size=12561) 2024-11-25T17:08:07,114 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/815a08e092b34601bcf917602482f2eb as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/815a08e092b34601bcf917602482f2eb 2024-11-25T17:08:07,125 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/B of 140432b4069c8ca485d8f3971c9e31fe into 815a08e092b34601bcf917602482f2eb(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:07,126 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:07,126 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/B, priority=13, startTime=1732554487025; duration=0sec 2024-11-25T17:08:07,126 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:07,126 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:B 2024-11-25T17:08:07,126 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:07,130 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:07,130 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/C is initiating minor compaction (all files) 2024-11-25T17:08:07,131 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/C in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:07,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741876_1052 (size=12561) 2024-11-25T17:08:07,131 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/cbb7d54c46a14904bc7474c322b1c88e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/31f8115dbe0445e181b0f8c44934fa94, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/07393e512fca4910b70e3ba3e8b8334b] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=35.9 K 2024-11-25T17:08:07,131 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting cbb7d54c46a14904bc7474c322b1c88e, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732554484581 2024-11-25T17:08:07,132 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 31f8115dbe0445e181b0f8c44934fa94, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732554485811 2024-11-25T17:08:07,133 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 07393e512fca4910b70e3ba3e8b8334b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732554486191 2024-11-25T17:08:07,146 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/205708f2209f458cb0dc65cbc93934f4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/205708f2209f458cb0dc65cbc93934f4 2024-11-25T17:08:07,156 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/A of 140432b4069c8ca485d8f3971c9e31fe into 205708f2209f458cb0dc65cbc93934f4(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:07,156 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:07,156 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/A, priority=13, startTime=1732554487025; duration=0sec 2024-11-25T17:08:07,156 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:07,156 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:A 2024-11-25T17:08:07,170 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#C#compaction#38 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:07,171 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/e76b672dd37443a4ab5b2e26af0b0578 is 50, key is test_row_0/C:col10/1732554486834/Put/seqid=0 2024-11-25T17:08:07,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741877_1053 (size=12561) 2024-11-25T17:08:07,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:07,213 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-25T17:08:07,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:07,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:07,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:07,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:07,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:07,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:07,221 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/8ecc3734e1594558840ea266c6ee0327 is 50, key is test_row_0/A:col10/1732554487210/Put/seqid=0 2024-11-25T17:08:07,224 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/e76b672dd37443a4ab5b2e26af0b0578 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/e76b672dd37443a4ab5b2e26af0b0578 2024-11-25T17:08:07,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554547229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554547231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554547231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554547234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,247 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/C of 140432b4069c8ca485d8f3971c9e31fe into e76b672dd37443a4ab5b2e26af0b0578(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:07,247 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:07,247 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/C, priority=13, startTime=1732554487026; duration=0sec 2024-11-25T17:08:07,248 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:07,248 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:C 2024-11-25T17:08:07,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741878_1054 (size=14541) 2024-11-25T17:08:07,256 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/8ecc3734e1594558840ea266c6ee0327 2024-11-25T17:08:07,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-25T17:08:07,268 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-25T17:08:07,272 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:07,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-25T17:08:07,274 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:07,275 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:07,276 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:07,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-25T17:08:07,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/2324b6abee6047009d52d44b0bde80ea is 50, key is test_row_0/B:col10/1732554487210/Put/seqid=0 2024-11-25T17:08:07,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741879_1055 (size=12151) 2024-11-25T17:08:07,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/2324b6abee6047009d52d44b0bde80ea 2024-11-25T17:08:07,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/3e111e18476844d98cc6ee32c360aebd is 50, key is test_row_0/C:col10/1732554487210/Put/seqid=0 2024-11-25T17:08:07,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554547337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554547337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554547336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554547346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741880_1056 (size=12151) 2024-11-25T17:08:07,372 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/3e111e18476844d98cc6ee32c360aebd 2024-11-25T17:08:07,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-25T17:08:07,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/8ecc3734e1594558840ea266c6ee0327 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/8ecc3734e1594558840ea266c6ee0327 2024-11-25T17:08:07,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/8ecc3734e1594558840ea266c6ee0327, entries=200, sequenceid=199, filesize=14.2 K 2024-11-25T17:08:07,396 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-25T17:08:07,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/2324b6abee6047009d52d44b0bde80ea as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/2324b6abee6047009d52d44b0bde80ea 2024-11-25T17:08:07,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/2324b6abee6047009d52d44b0bde80ea, entries=150, sequenceid=199, filesize=11.9 K 2024-11-25T17:08:07,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/3e111e18476844d98cc6ee32c360aebd as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/3e111e18476844d98cc6ee32c360aebd 2024-11-25T17:08:07,430 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:07,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-25T17:08:07,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:07,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/3e111e18476844d98cc6ee32c360aebd, entries=150, sequenceid=199, filesize=11.9 K 2024-11-25T17:08:07,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:07,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:07,435 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:07,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:07,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:07,442 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 140432b4069c8ca485d8f3971c9e31fe in 229ms, sequenceid=199, compaction requested=false 2024-11-25T17:08:07,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:07,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:07,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-25T17:08:07,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:07,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:07,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:07,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:07,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:07,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:07,561 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/b28cb8bde25a42c280138f253699d2a8 is 50, key is test_row_0/A:col10/1732554487229/Put/seqid=0 2024-11-25T17:08:07,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741881_1057 (size=14541) 2024-11-25T17:08:07,575 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/b28cb8bde25a42c280138f253699d2a8 2024-11-25T17:08:07,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-25T17:08:07,584 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/ecec7d75de3f47ca914b84b7adfd60c3 is 50, key is test_row_0/B:col10/1732554487229/Put/seqid=0 2024-11-25T17:08:07,588 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:07,589 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-25T17:08:07,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:07,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:07,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:07,589 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:07,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:07,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:07,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741882_1058 (size=12151) 2024-11-25T17:08:07,631 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554547625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554547629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554547631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554547636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554547656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,658 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., hostname=6579369734b6,41865,1732554474464, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:08:07,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554547735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554547736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554547736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,742 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:07,742 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-25T17:08:07,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:07,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:07,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:07,743 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:07,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:07,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:07,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554547742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-25T17:08:07,897 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:07,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-25T17:08:07,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:07,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:07,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:07,898 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:07,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:07,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:07,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554547939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554547940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554547944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:07,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554547948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:07,996 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/ecec7d75de3f47ca914b84b7adfd60c3 2024-11-25T17:08:08,020 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/d142742e12a04a0c95f21fafd07455f1 is 50, key is test_row_0/C:col10/1732554487229/Put/seqid=0 2024-11-25T17:08:08,052 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:08,054 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-25T17:08:08,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:08,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:08,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:08,055 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:08,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:08,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741883_1059 (size=12151) 2024-11-25T17:08:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:08,065 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/d142742e12a04a0c95f21fafd07455f1 2024-11-25T17:08:08,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/b28cb8bde25a42c280138f253699d2a8 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b28cb8bde25a42c280138f253699d2a8 2024-11-25T17:08:08,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b28cb8bde25a42c280138f253699d2a8, entries=200, sequenceid=215, filesize=14.2 K 2024-11-25T17:08:08,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/ecec7d75de3f47ca914b84b7adfd60c3 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ecec7d75de3f47ca914b84b7adfd60c3 2024-11-25T17:08:08,100 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ecec7d75de3f47ca914b84b7adfd60c3, entries=150, sequenceid=215, filesize=11.9 K 2024-11-25T17:08:08,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/d142742e12a04a0c95f21fafd07455f1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/d142742e12a04a0c95f21fafd07455f1 2024-11-25T17:08:08,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/d142742e12a04a0c95f21fafd07455f1, entries=150, sequenceid=215, filesize=11.9 K 2024-11-25T17:08:08,118 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 140432b4069c8ca485d8f3971c9e31fe in 568ms, sequenceid=215, compaction requested=true 2024-11-25T17:08:08,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:08,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:08,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:08,118 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:08,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:08,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:08,118 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:08,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:08,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:08,120 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:08,120 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:08,120 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/A is initiating minor compaction (all files) 2024-11-25T17:08:08,120 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/B is initiating minor compaction (all files) 2024-11-25T17:08:08,120 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/A in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:08,120 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/B in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:08,120 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/205708f2209f458cb0dc65cbc93934f4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/8ecc3734e1594558840ea266c6ee0327, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b28cb8bde25a42c280138f253699d2a8] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=40.7 K 2024-11-25T17:08:08,120 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/815a08e092b34601bcf917602482f2eb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/2324b6abee6047009d52d44b0bde80ea, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ecec7d75de3f47ca914b84b7adfd60c3] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=36.0 K 2024-11-25T17:08:08,121 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 815a08e092b34601bcf917602482f2eb, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732554486191 2024-11-25T17:08:08,122 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 205708f2209f458cb0dc65cbc93934f4, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732554486191 2024-11-25T17:08:08,122 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 2324b6abee6047009d52d44b0bde80ea, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732554486890 2024-11-25T17:08:08,123 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ecc3734e1594558840ea266c6ee0327, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732554486890 2024-11-25T17:08:08,123 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting ecec7d75de3f47ca914b84b7adfd60c3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732554487229 2024-11-25T17:08:08,124 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting b28cb8bde25a42c280138f253699d2a8, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732554487229 2024-11-25T17:08:08,141 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#B#compaction#45 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:08,141 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/5a0e34c4165546aebc0605ba36ba5ef7 is 50, key is test_row_0/B:col10/1732554487229/Put/seqid=0 2024-11-25T17:08:08,151 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#A#compaction#46 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:08,153 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/bffb98e22fb5411c86d36d7fe828c4c3 is 50, key is test_row_0/A:col10/1732554487229/Put/seqid=0 2024-11-25T17:08:08,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741884_1060 (size=12663) 2024-11-25T17:08:08,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741885_1061 (size=12663) 2024-11-25T17:08:08,209 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:08,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-25T17:08:08,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:08,210 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-25T17:08:08,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:08,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:08,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:08,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:08,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:08,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:08,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/f214be0bae424d82a678ba0882e23de7 is 50, key is test_row_0/A:col10/1732554487630/Put/seqid=0 2024-11-25T17:08:08,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:08,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554548269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741886_1062 (size=12151) 2024-11-25T17:08:08,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554548275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,283 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/f214be0bae424d82a678ba0882e23de7 2024-11-25T17:08:08,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554548285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554548285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/a4124726d03741d4b056067a3ea2cea5 is 50, key is test_row_0/B:col10/1732554487630/Put/seqid=0 2024-11-25T17:08:08,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741887_1063 (size=12151) 2024-11-25T17:08:08,346 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/a4124726d03741d4b056067a3ea2cea5 2024-11-25T17:08:08,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/9fd710de6430470ba893eab5b30d9064 is 50, key is test_row_0/C:col10/1732554487630/Put/seqid=0 2024-11-25T17:08:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-25T17:08:08,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741888_1064 (size=12151) 2024-11-25T17:08:08,384 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/9fd710de6430470ba893eab5b30d9064 2024-11-25T17:08:08,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554548380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554548385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554548388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554548389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/f214be0bae424d82a678ba0882e23de7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f214be0bae424d82a678ba0882e23de7 2024-11-25T17:08:08,406 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f214be0bae424d82a678ba0882e23de7, entries=150, sequenceid=237, filesize=11.9 K 2024-11-25T17:08:08,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/a4124726d03741d4b056067a3ea2cea5 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/a4124726d03741d4b056067a3ea2cea5 2024-11-25T17:08:08,416 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/a4124726d03741d4b056067a3ea2cea5, entries=150, sequenceid=237, filesize=11.9 K 2024-11-25T17:08:08,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/9fd710de6430470ba893eab5b30d9064 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9fd710de6430470ba893eab5b30d9064 2024-11-25T17:08:08,444 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9fd710de6430470ba893eab5b30d9064, entries=150, sequenceid=237, filesize=11.9 K 2024-11-25T17:08:08,446 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 140432b4069c8ca485d8f3971c9e31fe in 236ms, sequenceid=237, compaction requested=true 2024-11-25T17:08:08,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:08,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:08,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-25T17:08:08,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-25T17:08:08,453 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-25T17:08:08,453 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1740 sec 2024-11-25T17:08:08,460 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.1850 sec 2024-11-25T17:08:08,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-25T17:08:08,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:08,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:08,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:08,592 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/5a0e34c4165546aebc0605ba36ba5ef7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/5a0e34c4165546aebc0605ba36ba5ef7 2024-11-25T17:08:08,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:08,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:08,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:08,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:08,593 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/bffb98e22fb5411c86d36d7fe828c4c3 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/bffb98e22fb5411c86d36d7fe828c4c3 2024-11-25T17:08:08,603 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/B of 140432b4069c8ca485d8f3971c9e31fe into 5a0e34c4165546aebc0605ba36ba5ef7(size=12.4 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:08,603 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:08,603 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/B, priority=13, startTime=1732554488118; duration=0sec 2024-11-25T17:08:08,603 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:08,603 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:B 2024-11-25T17:08:08,604 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:08:08,605 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:08:08,606 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/C is initiating minor compaction (all files) 2024-11-25T17:08:08,606 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/C in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:08,606 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/e76b672dd37443a4ab5b2e26af0b0578, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/3e111e18476844d98cc6ee32c360aebd, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/d142742e12a04a0c95f21fafd07455f1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9fd710de6430470ba893eab5b30d9064] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=47.9 K 2024-11-25T17:08:08,607 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/A of 140432b4069c8ca485d8f3971c9e31fe into bffb98e22fb5411c86d36d7fe828c4c3(size=12.4 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:08,607 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:08,607 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/A, priority=13, startTime=1732554488118; duration=0sec 2024-11-25T17:08:08,607 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:08,607 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:A 2024-11-25T17:08:08,609 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/d95756b6bf5b4933b2747e611708fe4b is 50, key is test_row_0/A:col10/1732554488273/Put/seqid=0 2024-11-25T17:08:08,615 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting e76b672dd37443a4ab5b2e26af0b0578, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732554486191 2024-11-25T17:08:08,616 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e111e18476844d98cc6ee32c360aebd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732554486890 2024-11-25T17:08:08,616 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting d142742e12a04a0c95f21fafd07455f1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732554487229 2024-11-25T17:08:08,617 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 9fd710de6430470ba893eab5b30d9064, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732554487618 2024-11-25T17:08:08,644 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#C#compaction#51 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:08,645 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/037a3976f9974495ba0075e3a4480e8e is 50, key is test_row_0/C:col10/1732554487630/Put/seqid=0 2024-11-25T17:08:08,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554548640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554548641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554548644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554548645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741889_1065 (size=12151) 2024-11-25T17:08:08,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/d95756b6bf5b4933b2747e611708fe4b 2024-11-25T17:08:08,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741890_1066 (size=12697) 2024-11-25T17:08:08,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/a6955de4ab7940228432460f1c117684 is 50, key is test_row_0/B:col10/1732554488273/Put/seqid=0 2024-11-25T17:08:08,698 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/037a3976f9974495ba0075e3a4480e8e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/037a3976f9974495ba0075e3a4480e8e 2024-11-25T17:08:08,732 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/C of 140432b4069c8ca485d8f3971c9e31fe into 037a3976f9974495ba0075e3a4480e8e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:08,732 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:08,732 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/C, priority=12, startTime=1732554488118; duration=0sec 2024-11-25T17:08:08,732 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:08,732 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:C 2024-11-25T17:08:08,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741891_1067 (size=12151) 2024-11-25T17:08:08,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554548747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554548749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554548752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554548753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554548950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554548955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554548956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:08,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:08,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554548957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:09,143 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/a6955de4ab7940228432460f1c117684 2024-11-25T17:08:09,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/c945ce97f28e43d7a82f2f02d1a1889a is 50, key is test_row_0/C:col10/1732554488273/Put/seqid=0 2024-11-25T17:08:09,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741892_1068 (size=12151) 2024-11-25T17:08:09,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:09,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554549260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:09,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:09,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554549260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:09,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:09,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554549261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:09,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:09,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554549261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:09,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-25T17:08:09,384 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-25T17:08:09,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:09,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-25T17:08:09,388 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:09,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-25T17:08:09,390 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:09,390 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:09,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-25T17:08:09,542 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:09,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-25T17:08:09,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:09,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:09,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:09,543 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:09,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:09,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:09,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/c945ce97f28e43d7a82f2f02d1a1889a 2024-11-25T17:08:09,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/d95756b6bf5b4933b2747e611708fe4b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d95756b6bf5b4933b2747e611708fe4b 2024-11-25T17:08:09,619 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d95756b6bf5b4933b2747e611708fe4b, entries=150, sequenceid=252, filesize=11.9 K 2024-11-25T17:08:09,621 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/a6955de4ab7940228432460f1c117684 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/a6955de4ab7940228432460f1c117684 2024-11-25T17:08:09,634 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/a6955de4ab7940228432460f1c117684, entries=150, sequenceid=252, filesize=11.9 K 2024-11-25T17:08:09,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/c945ce97f28e43d7a82f2f02d1a1889a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/c945ce97f28e43d7a82f2f02d1a1889a 2024-11-25T17:08:09,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/c945ce97f28e43d7a82f2f02d1a1889a, entries=150, sequenceid=252, filesize=11.9 K 2024-11-25T17:08:09,653 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 140432b4069c8ca485d8f3971c9e31fe in 1062ms, sequenceid=252, compaction requested=true 2024-11-25T17:08:09,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:09,660 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:09,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:09,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:09,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:09,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:09,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:09,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:09,662 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:09,662 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:09,662 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/A is initiating minor compaction (all files) 2024-11-25T17:08:09,662 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/A in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:09,662 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/bffb98e22fb5411c86d36d7fe828c4c3, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f214be0bae424d82a678ba0882e23de7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d95756b6bf5b4933b2747e611708fe4b] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=36.1 K 2024-11-25T17:08:09,664 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:09,664 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/B is initiating minor compaction (all files) 2024-11-25T17:08:09,664 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/B in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:09,664 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/5a0e34c4165546aebc0605ba36ba5ef7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/a4124726d03741d4b056067a3ea2cea5, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/a6955de4ab7940228432460f1c117684] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=36.1 K 2024-11-25T17:08:09,665 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting bffb98e22fb5411c86d36d7fe828c4c3, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732554487229 2024-11-25T17:08:09,665 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a0e34c4165546aebc0605ba36ba5ef7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732554487229 2024-11-25T17:08:09,666 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting f214be0bae424d82a678ba0882e23de7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732554487618 2024-11-25T17:08:09,666 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4124726d03741d4b056067a3ea2cea5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732554487618 2024-11-25T17:08:09,666 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting d95756b6bf5b4933b2747e611708fe4b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732554488266 2024-11-25T17:08:09,666 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6955de4ab7940228432460f1c117684, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732554488266 2024-11-25T17:08:09,687 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#B#compaction#54 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:09,687 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#A#compaction#55 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:09,688 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/b02c9b3399214fe0bcd9aa2506390636 is 50, key is test_row_0/B:col10/1732554488273/Put/seqid=0 2024-11-25T17:08:09,689 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/b9bdf47ab97048f7ad4bf10a65cfff5b is 50, key is test_row_0/A:col10/1732554488273/Put/seqid=0 2024-11-25T17:08:09,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-25T17:08:09,698 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:09,699 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-25T17:08:09,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:09,699 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-25T17:08:09,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:09,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:09,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:09,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:09,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:09,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:09,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741893_1069 (size=12765) 2024-11-25T17:08:09,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741894_1070 (size=12765) 2024-11-25T17:08:09,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/35d8bf619f204459a517d180a56c0dfd is 50, key is test_row_0/A:col10/1732554488641/Put/seqid=0 2024-11-25T17:08:09,726 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/b02c9b3399214fe0bcd9aa2506390636 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/b02c9b3399214fe0bcd9aa2506390636 2024-11-25T17:08:09,736 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/B of 140432b4069c8ca485d8f3971c9e31fe into b02c9b3399214fe0bcd9aa2506390636(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:09,736 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:09,737 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/B, priority=13, startTime=1732554489661; duration=0sec 2024-11-25T17:08:09,737 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:09,737 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:B 2024-11-25T17:08:09,737 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-25T17:08:09,739 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-25T17:08:09,740 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-25T17:08:09,740 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. because compaction request was cancelled 2024-11-25T17:08:09,740 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:C 2024-11-25T17:08:09,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741895_1071 (size=12301) 2024-11-25T17:08:09,751 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/35d8bf619f204459a517d180a56c0dfd 2024-11-25T17:08:09,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:09,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:09,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/c9241fc3a2e0401ca9e156f8ef32e803 is 50, key is test_row_0/B:col10/1732554488641/Put/seqid=0 2024-11-25T17:08:09,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741896_1072 (size=12301) 2024-11-25T17:08:09,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:09,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554549785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:09,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:09,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554549785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:09,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:09,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554549787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:09,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:09,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554549788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:09,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:09,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554549890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:09,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:09,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554549894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:09,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:09,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554549894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:09,898 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:09,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554549898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:09,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-25T17:08:10,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:10,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554550095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:10,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:10,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554550097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:10,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:10,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554550097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:10,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:10,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554550100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:10,126 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/b9bdf47ab97048f7ad4bf10a65cfff5b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b9bdf47ab97048f7ad4bf10a65cfff5b 2024-11-25T17:08:10,141 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/A of 140432b4069c8ca485d8f3971c9e31fe into b9bdf47ab97048f7ad4bf10a65cfff5b(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:10,143 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:10,143 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/A, priority=13, startTime=1732554489660; duration=0sec 2024-11-25T17:08:10,143 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:10,143 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:A 2024-11-25T17:08:10,179 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/c9241fc3a2e0401ca9e156f8ef32e803 2024-11-25T17:08:10,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/b72f05efbc574ccb887470fbc7217486 is 50, key is test_row_0/C:col10/1732554488641/Put/seqid=0 2024-11-25T17:08:10,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741897_1073 (size=12301) 2024-11-25T17:08:10,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:10,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554550398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:10,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:10,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554550400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:10,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:10,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554550405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:10,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:10,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554550408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:10,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-25T17:08:10,613 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/b72f05efbc574ccb887470fbc7217486 2024-11-25T17:08:10,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/35d8bf619f204459a517d180a56c0dfd as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/35d8bf619f204459a517d180a56c0dfd 2024-11-25T17:08:10,652 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/35d8bf619f204459a517d180a56c0dfd, entries=150, sequenceid=276, filesize=12.0 K 2024-11-25T17:08:10,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/c9241fc3a2e0401ca9e156f8ef32e803 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/c9241fc3a2e0401ca9e156f8ef32e803 2024-11-25T17:08:10,679 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/c9241fc3a2e0401ca9e156f8ef32e803, entries=150, sequenceid=276, filesize=12.0 K 2024-11-25T17:08:10,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/b72f05efbc574ccb887470fbc7217486 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/b72f05efbc574ccb887470fbc7217486 2024-11-25T17:08:10,691 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/b72f05efbc574ccb887470fbc7217486, entries=150, sequenceid=276, filesize=12.0 K 2024-11-25T17:08:10,692 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 140432b4069c8ca485d8f3971c9e31fe in 993ms, sequenceid=276, compaction requested=true 2024-11-25T17:08:10,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:10,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:10,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-25T17:08:10,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-25T17:08:10,699 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-25T17:08:10,699 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3060 sec 2024-11-25T17:08:10,702 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.3140 sec 2024-11-25T17:08:10,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:10,907 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-25T17:08:10,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:10,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:10,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:10,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:10,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:10,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:10,917 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/2a5e5de73f444f86ad815b0f720c41ac is 50, key is test_row_0/A:col10/1732554490905/Put/seqid=0 2024-11-25T17:08:10,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741898_1074 (size=12301) 2024-11-25T17:08:10,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:10,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554550936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:10,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:10,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554550938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:10,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:10,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554550938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:10,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:10,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554550940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:11,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:11,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554551045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:11,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:11,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554551045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:11,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:11,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554551046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:11,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:11,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554551046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:11,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:11,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554551250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:11,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:11,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554551250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:11,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:11,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554551250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:11,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:11,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554551252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:11,326 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/2a5e5de73f444f86ad815b0f720c41ac 2024-11-25T17:08:11,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/b9e0e207118b4f5d86e85ada2e97191d is 50, key is test_row_0/B:col10/1732554490905/Put/seqid=0 2024-11-25T17:08:11,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741899_1075 (size=12301) 2024-11-25T17:08:11,409 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/b9e0e207118b4f5d86e85ada2e97191d 2024-11-25T17:08:11,426 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/8d4f18697dd548d2ad048380d5968da4 is 50, key is test_row_0/C:col10/1732554490905/Put/seqid=0 2024-11-25T17:08:11,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741900_1076 (size=12301) 2024-11-25T17:08:11,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-25T17:08:11,495 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-25T17:08:11,496 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:11,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-25T17:08:11,502 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:11,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-25T17:08:11,503 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:11,504 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:11,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:11,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554551554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:11,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:11,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554551556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:11,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:11,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554551557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:11,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:11,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554551558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:11,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-25T17:08:11,656 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:11,656 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-25T17:08:11,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:11,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:11,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:11,657 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:11,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:11,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:11,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:11,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40224 deadline: 1732554551669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:11,673 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8182 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., hostname=6579369734b6,41865,1732554474464, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:08:11,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-25T17:08:11,810 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:11,817 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-25T17:08:11,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:11,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:11,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:11,817 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:11,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:11,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:11,833 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/8d4f18697dd548d2ad048380d5968da4 2024-11-25T17:08:11,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/2a5e5de73f444f86ad815b0f720c41ac as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/2a5e5de73f444f86ad815b0f720c41ac 2024-11-25T17:08:11,869 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/2a5e5de73f444f86ad815b0f720c41ac, entries=150, sequenceid=294, filesize=12.0 K 2024-11-25T17:08:11,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/b9e0e207118b4f5d86e85ada2e97191d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/b9e0e207118b4f5d86e85ada2e97191d 2024-11-25T17:08:11,880 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/b9e0e207118b4f5d86e85ada2e97191d, entries=150, sequenceid=294, filesize=12.0 K 2024-11-25T17:08:11,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/8d4f18697dd548d2ad048380d5968da4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/8d4f18697dd548d2ad048380d5968da4 2024-11-25T17:08:11,893 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/8d4f18697dd548d2ad048380d5968da4, entries=150, sequenceid=294, filesize=12.0 K 2024-11-25T17:08:11,895 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 140432b4069c8ca485d8f3971c9e31fe in 988ms, sequenceid=294, compaction requested=true 2024-11-25T17:08:11,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:11,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:11,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:11,895 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:11,895 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:11,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:11,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:11,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:11,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:11,898 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:11,898 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/A is initiating minor compaction (all files) 2024-11-25T17:08:11,898 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/A in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:11,898 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b9bdf47ab97048f7ad4bf10a65cfff5b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/35d8bf619f204459a517d180a56c0dfd, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/2a5e5de73f444f86ad815b0f720c41ac] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=36.5 K 2024-11-25T17:08:11,898 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:11,898 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/B is initiating minor compaction (all files) 2024-11-25T17:08:11,898 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/B in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:11,899 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9bdf47ab97048f7ad4bf10a65cfff5b, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732554488266 2024-11-25T17:08:11,899 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/b02c9b3399214fe0bcd9aa2506390636, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/c9241fc3a2e0401ca9e156f8ef32e803, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/b9e0e207118b4f5d86e85ada2e97191d] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=36.5 K 2024-11-25T17:08:11,899 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35d8bf619f204459a517d180a56c0dfd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732554488638 2024-11-25T17:08:11,899 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting b02c9b3399214fe0bcd9aa2506390636, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732554488266 2024-11-25T17:08:11,900 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a5e5de73f444f86ad815b0f720c41ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732554489783 2024-11-25T17:08:11,900 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting c9241fc3a2e0401ca9e156f8ef32e803, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732554488638 2024-11-25T17:08:11,900 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting b9e0e207118b4f5d86e85ada2e97191d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732554489783 2024-11-25T17:08:11,933 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#A#compaction#62 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:11,938 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#B#compaction#63 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:11,938 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/d81aeaa2a0454c8390cb40b31875de01 is 50, key is test_row_0/B:col10/1732554490905/Put/seqid=0 2024-11-25T17:08:11,939 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/d381046c34d9467d8a8cacb96af257e8 is 50, key is test_row_0/A:col10/1732554490905/Put/seqid=0 2024-11-25T17:08:11,971 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:11,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-25T17:08:11,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:11,979 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-25T17:08:11,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:11,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:11,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:11,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:11,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:11,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:11,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741902_1078 (size=13017) 2024-11-25T17:08:11,995 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/d381046c34d9467d8a8cacb96af257e8 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d381046c34d9467d8a8cacb96af257e8 2024-11-25T17:08:12,004 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/A of 140432b4069c8ca485d8f3971c9e31fe into d381046c34d9467d8a8cacb96af257e8(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:12,004 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:12,004 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/A, priority=13, startTime=1732554491895; duration=0sec 2024-11-25T17:08:12,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741901_1077 (size=13017) 2024-11-25T17:08:12,004 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:12,004 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:A 2024-11-25T17:08:12,007 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:08:12,009 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:08:12,009 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/C is initiating minor compaction (all files) 2024-11-25T17:08:12,009 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/C in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:12,009 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/037a3976f9974495ba0075e3a4480e8e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/c945ce97f28e43d7a82f2f02d1a1889a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/b72f05efbc574ccb887470fbc7217486, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/8d4f18697dd548d2ad048380d5968da4] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=48.3 K 2024-11-25T17:08:12,011 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 037a3976f9974495ba0075e3a4480e8e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732554487618 2024-11-25T17:08:12,012 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting c945ce97f28e43d7a82f2f02d1a1889a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732554488266 2024-11-25T17:08:12,013 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting b72f05efbc574ccb887470fbc7217486, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732554488638 2024-11-25T17:08:12,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/5b7745e0047c4d58bd2ab822d5174b6c is 50, key is test_row_0/A:col10/1732554490938/Put/seqid=0 2024-11-25T17:08:12,015 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d4f18697dd548d2ad048380d5968da4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732554489783 2024-11-25T17:08:12,039 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#C#compaction#65 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:12,040 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/2d6de6484561424191df0c9e3c13fdf8 is 50, key is test_row_0/C:col10/1732554490905/Put/seqid=0 2024-11-25T17:08:12,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741903_1079 (size=12301) 2024-11-25T17:08:12,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:12,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:12,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741904_1080 (size=12983) 2024-11-25T17:08:12,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554552085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554552086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554552088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554552090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-25T17:08:12,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554552192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554552192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554552192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554552194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554552396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554552397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554552397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554552405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,427 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/d81aeaa2a0454c8390cb40b31875de01 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/d81aeaa2a0454c8390cb40b31875de01 2024-11-25T17:08:12,446 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/5b7745e0047c4d58bd2ab822d5174b6c 2024-11-25T17:08:12,467 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/B of 140432b4069c8ca485d8f3971c9e31fe into d81aeaa2a0454c8390cb40b31875de01(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:12,467 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:12,467 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/B, priority=13, startTime=1732554491895; duration=0sec 2024-11-25T17:08:12,467 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:12,467 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:B 2024-11-25T17:08:12,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/9bf16ad1038d4dfd992fdec1c9ca44ce is 50, key is test_row_0/B:col10/1732554490938/Put/seqid=0 2024-11-25T17:08:12,481 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/2d6de6484561424191df0c9e3c13fdf8 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/2d6de6484561424191df0c9e3c13fdf8 2024-11-25T17:08:12,513 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/C of 140432b4069c8ca485d8f3971c9e31fe into 2d6de6484561424191df0c9e3c13fdf8(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:12,513 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:12,513 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/C, priority=12, startTime=1732554491896; duration=0sec 2024-11-25T17:08:12,514 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:12,514 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:C 2024-11-25T17:08:12,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741905_1081 (size=12301) 2024-11-25T17:08:12,540 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/9bf16ad1038d4dfd992fdec1c9ca44ce 2024-11-25T17:08:12,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/a70bb0b1783c4be893235cab49e508d4 is 50, key is test_row_0/C:col10/1732554490938/Put/seqid=0 2024-11-25T17:08:12,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741906_1082 (size=12301) 2024-11-25T17:08:12,571 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/a70bb0b1783c4be893235cab49e508d4 2024-11-25T17:08:12,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/5b7745e0047c4d58bd2ab822d5174b6c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/5b7745e0047c4d58bd2ab822d5174b6c 2024-11-25T17:08:12,591 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/5b7745e0047c4d58bd2ab822d5174b6c, entries=150, sequenceid=314, filesize=12.0 K 2024-11-25T17:08:12,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/9bf16ad1038d4dfd992fdec1c9ca44ce as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/9bf16ad1038d4dfd992fdec1c9ca44ce 2024-11-25T17:08:12,599 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/9bf16ad1038d4dfd992fdec1c9ca44ce, entries=150, sequenceid=314, filesize=12.0 K 2024-11-25T17:08:12,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/a70bb0b1783c4be893235cab49e508d4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a70bb0b1783c4be893235cab49e508d4 2024-11-25T17:08:12,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-25T17:08:12,615 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a70bb0b1783c4be893235cab49e508d4, entries=150, sequenceid=314, filesize=12.0 K 2024-11-25T17:08:12,616 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 140432b4069c8ca485d8f3971c9e31fe in 637ms, sequenceid=314, compaction requested=false 2024-11-25T17:08:12,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:12,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:12,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-25T17:08:12,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-25T17:08:12,621 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-25T17:08:12,621 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1150 sec 2024-11-25T17:08:12,624 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.1260 sec 2024-11-25T17:08:12,704 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-25T17:08:12,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:12,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:12,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:12,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:12,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:12,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:12,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:12,713 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/75bb5b3a46d0482a9a7b781b046063f6 is 50, key is test_row_0/A:col10/1732554492083/Put/seqid=0 2024-11-25T17:08:12,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741907_1083 (size=14741) 2024-11-25T17:08:12,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554552725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554552725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554552726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554552729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554552830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554552831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554552831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:12,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:12,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554552837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:13,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554553034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:13,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554553038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:13,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554553040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:13,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554553042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:13,147 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/75bb5b3a46d0482a9a7b781b046063f6 2024-11-25T17:08:13,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/8c81e843f4e241b3aa9600841965ff82 is 50, key is test_row_0/B:col10/1732554492083/Put/seqid=0 2024-11-25T17:08:13,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741908_1084 (size=12301) 2024-11-25T17:08:13,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/8c81e843f4e241b3aa9600841965ff82 2024-11-25T17:08:13,190 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/a8f9e31c410a435caab32fd9c6b4a1ca is 50, key is test_row_0/C:col10/1732554492083/Put/seqid=0 2024-11-25T17:08:13,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741909_1085 (size=12301) 2024-11-25T17:08:13,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:13,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554553342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:13,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:13,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554553343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:13,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:13,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554553346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:13,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:13,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554553347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:13,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/a8f9e31c410a435caab32fd9c6b4a1ca 2024-11-25T17:08:13,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-25T17:08:13,609 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-25T17:08:13,611 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:13,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/75bb5b3a46d0482a9a7b781b046063f6 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/75bb5b3a46d0482a9a7b781b046063f6 2024-11-25T17:08:13,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-25T17:08:13,614 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:13,614 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:13,614 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:13,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-25T17:08:13,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/75bb5b3a46d0482a9a7b781b046063f6, entries=200, sequenceid=334, filesize=14.4 K 2024-11-25T17:08:13,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/8c81e843f4e241b3aa9600841965ff82 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8c81e843f4e241b3aa9600841965ff82 2024-11-25T17:08:13,632 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8c81e843f4e241b3aa9600841965ff82, entries=150, sequenceid=334, filesize=12.0 K 2024-11-25T17:08:13,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/a8f9e31c410a435caab32fd9c6b4a1ca as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a8f9e31c410a435caab32fd9c6b4a1ca 2024-11-25T17:08:13,650 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a8f9e31c410a435caab32fd9c6b4a1ca, entries=150, sequenceid=334, filesize=12.0 K 2024-11-25T17:08:13,652 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 140432b4069c8ca485d8f3971c9e31fe in 948ms, sequenceid=334, compaction requested=true 2024-11-25T17:08:13,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:13,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:13,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:13,652 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:13,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:13,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:13,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:13,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:13,653 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:13,656 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40059 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:13,656 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/A is initiating minor compaction (all files) 2024-11-25T17:08:13,656 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/A in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:13,656 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d381046c34d9467d8a8cacb96af257e8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/5b7745e0047c4d58bd2ab822d5174b6c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/75bb5b3a46d0482a9a7b781b046063f6] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=39.1 K 2024-11-25T17:08:13,657 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:13,657 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/B is initiating minor compaction (all files) 2024-11-25T17:08:13,657 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/B in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:13,657 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/d81aeaa2a0454c8390cb40b31875de01, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/9bf16ad1038d4dfd992fdec1c9ca44ce, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8c81e843f4e241b3aa9600841965ff82] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=36.7 K 2024-11-25T17:08:13,658 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting d81aeaa2a0454c8390cb40b31875de01, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732554489783 2024-11-25T17:08:13,658 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting d381046c34d9467d8a8cacb96af257e8, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732554489783 2024-11-25T17:08:13,660 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bf16ad1038d4dfd992fdec1c9ca44ce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732554490934 2024-11-25T17:08:13,660 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b7745e0047c4d58bd2ab822d5174b6c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732554490934 2024-11-25T17:08:13,660 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c81e843f4e241b3aa9600841965ff82, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732554492083 2024-11-25T17:08:13,661 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75bb5b3a46d0482a9a7b781b046063f6, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732554492083 2024-11-25T17:08:13,672 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#A#compaction#71 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:13,673 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/7dda291fb8b04cb48c8aff53a4e136fc is 50, key is test_row_0/A:col10/1732554492083/Put/seqid=0 2024-11-25T17:08:13,679 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#B#compaction#72 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:13,680 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/dcc2e8d2a3c240259b747bd342a19b3f is 50, key is test_row_0/B:col10/1732554492083/Put/seqid=0 2024-11-25T17:08:13,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741910_1086 (size=13119) 2024-11-25T17:08:13,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741911_1087 (size=13119) 2024-11-25T17:08:13,715 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/7dda291fb8b04cb48c8aff53a4e136fc as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/7dda291fb8b04cb48c8aff53a4e136fc 2024-11-25T17:08:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-25T17:08:13,719 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/dcc2e8d2a3c240259b747bd342a19b3f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/dcc2e8d2a3c240259b747bd342a19b3f 2024-11-25T17:08:13,728 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/A of 140432b4069c8ca485d8f3971c9e31fe into 7dda291fb8b04cb48c8aff53a4e136fc(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:13,728 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:13,728 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/B of 140432b4069c8ca485d8f3971c9e31fe into dcc2e8d2a3c240259b747bd342a19b3f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:13,728 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/A, priority=13, startTime=1732554493652; duration=0sec 2024-11-25T17:08:13,728 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:13,728 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/B, priority=13, startTime=1732554493652; duration=0sec 2024-11-25T17:08:13,728 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:13,728 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:13,728 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:A 2024-11-25T17:08:13,728 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:B 2024-11-25T17:08:13,729 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:13,731 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:13,731 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/C is initiating minor compaction (all files) 2024-11-25T17:08:13,731 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/C in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:13,731 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/2d6de6484561424191df0c9e3c13fdf8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a70bb0b1783c4be893235cab49e508d4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a8f9e31c410a435caab32fd9c6b4a1ca] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=36.7 K 2024-11-25T17:08:13,732 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d6de6484561424191df0c9e3c13fdf8, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732554489783 2024-11-25T17:08:13,732 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting a70bb0b1783c4be893235cab49e508d4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732554490934 2024-11-25T17:08:13,733 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8f9e31c410a435caab32fd9c6b4a1ca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732554492083 2024-11-25T17:08:13,744 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#C#compaction#73 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:13,745 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/3631ad12e3e24a1db37fbdb35ae78a47 is 50, key is test_row_0/C:col10/1732554492083/Put/seqid=0 2024-11-25T17:08:13,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741912_1088 (size=13085) 2024-11-25T17:08:13,767 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:13,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-25T17:08:13,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:13,768 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-25T17:08:13,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:13,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:13,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:13,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:13,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:13,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:13,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/4e82485acd3049648a8875a980af18ea is 50, key is test_row_0/A:col10/1732554492720/Put/seqid=0 2024-11-25T17:08:13,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741913_1089 (size=12301) 2024-11-25T17:08:13,782 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/4e82485acd3049648a8875a980af18ea 2024-11-25T17:08:13,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/f796016cb6ac40f9a13763dfb96764cf is 50, key is test_row_0/B:col10/1732554492720/Put/seqid=0 2024-11-25T17:08:13,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741914_1090 (size=12301) 2024-11-25T17:08:13,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:13,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:13,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:13,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554553868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:13,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:13,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554553869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:13,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:13,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554553872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:13,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:13,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554553873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:13,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-25T17:08:13,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:13,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554553972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:13,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:13,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554553976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:13,978 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:13,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554553976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:13,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:13,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554553978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:14,164 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/3631ad12e3e24a1db37fbdb35ae78a47 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/3631ad12e3e24a1db37fbdb35ae78a47 2024-11-25T17:08:14,173 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/C of 140432b4069c8ca485d8f3971c9e31fe into 3631ad12e3e24a1db37fbdb35ae78a47(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:14,173 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:14,173 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/C, priority=13, startTime=1732554493652; duration=0sec 2024-11-25T17:08:14,173 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:14,174 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:C 2024-11-25T17:08:14,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:14,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554554178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:14,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:14,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554554180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:14,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:14,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554554180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:14,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:14,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554554181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:14,200 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/f796016cb6ac40f9a13763dfb96764cf 2024-11-25T17:08:14,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/93763932a9f144c0a1799133c90eadfe is 50, key is test_row_0/C:col10/1732554492720/Put/seqid=0 2024-11-25T17:08:14,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-25T17:08:14,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741915_1091 (size=12301) 2024-11-25T17:08:14,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:14,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554554482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:14,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:14,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554554484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:14,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:14,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554554484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:14,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:14,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554554485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:14,620 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/93763932a9f144c0a1799133c90eadfe 2024-11-25T17:08:14,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/4e82485acd3049648a8875a980af18ea as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/4e82485acd3049648a8875a980af18ea 2024-11-25T17:08:14,641 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/4e82485acd3049648a8875a980af18ea, entries=150, sequenceid=355, filesize=12.0 K 2024-11-25T17:08:14,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/f796016cb6ac40f9a13763dfb96764cf as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/f796016cb6ac40f9a13763dfb96764cf 2024-11-25T17:08:14,651 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/f796016cb6ac40f9a13763dfb96764cf, entries=150, sequenceid=355, filesize=12.0 K 2024-11-25T17:08:14,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/93763932a9f144c0a1799133c90eadfe as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/93763932a9f144c0a1799133c90eadfe 2024-11-25T17:08:14,658 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/93763932a9f144c0a1799133c90eadfe, entries=150, sequenceid=355, filesize=12.0 K 2024-11-25T17:08:14,660 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 140432b4069c8ca485d8f3971c9e31fe in 892ms, sequenceid=355, compaction requested=false 2024-11-25T17:08:14,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:14,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:14,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-25T17:08:14,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-25T17:08:14,663 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-25T17:08:14,664 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0470 sec 2024-11-25T17:08:14,666 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.0530 sec 2024-11-25T17:08:14,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-25T17:08:14,719 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-25T17:08:14,722 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:14,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-25T17:08:14,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-25T17:08:14,724 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:14,725 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:14,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:14,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-25T17:08:14,885 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:14,885 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-25T17:08:14,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:14,886 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-25T17:08:14,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:14,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:14,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:14,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:14,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:14,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:14,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/b55b0a92753c416d9f17c836660ed3d9 is 50, key is test_row_0/A:col10/1732554493865/Put/seqid=0 2024-11-25T17:08:14,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741916_1092 (size=12301) 2024-11-25T17:08:14,921 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/b55b0a92753c416d9f17c836660ed3d9 2024-11-25T17:08:14,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/08f624db275644ce8af56427b524eb6d is 50, key is test_row_0/B:col10/1732554493865/Put/seqid=0 2024-11-25T17:08:14,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741917_1093 (size=12301) 2024-11-25T17:08:14,949 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/08f624db275644ce8af56427b524eb6d 2024-11-25T17:08:14,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/81cc5eadc36e4cd5a6f97beb8f8d3941 is 50, key is test_row_0/C:col10/1732554493865/Put/seqid=0 2024-11-25T17:08:14,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741918_1094 (size=12301) 2024-11-25T17:08:14,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:14,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:15,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554555007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554555007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554555008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554555010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-25T17:08:15,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554555112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554555112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554555112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554555113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554555317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554555317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554555316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554555320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-25T17:08:15,368 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/81cc5eadc36e4cd5a6f97beb8f8d3941 2024-11-25T17:08:15,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/b55b0a92753c416d9f17c836660ed3d9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b55b0a92753c416d9f17c836660ed3d9 2024-11-25T17:08:15,411 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b55b0a92753c416d9f17c836660ed3d9, entries=150, sequenceid=373, filesize=12.0 K 2024-11-25T17:08:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/08f624db275644ce8af56427b524eb6d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/08f624db275644ce8af56427b524eb6d 2024-11-25T17:08:15,424 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/08f624db275644ce8af56427b524eb6d, entries=150, sequenceid=373, filesize=12.0 K 2024-11-25T17:08:15,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/81cc5eadc36e4cd5a6f97beb8f8d3941 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/81cc5eadc36e4cd5a6f97beb8f8d3941 2024-11-25T17:08:15,433 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/81cc5eadc36e4cd5a6f97beb8f8d3941, entries=150, sequenceid=373, filesize=12.0 K 2024-11-25T17:08:15,435 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 140432b4069c8ca485d8f3971c9e31fe in 549ms, sequenceid=373, compaction requested=true 2024-11-25T17:08:15,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:15,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:15,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-25T17:08:15,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-25T17:08:15,447 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-25T17:08:15,447 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 716 msec 2024-11-25T17:08:15,450 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 726 msec 2024-11-25T17:08:15,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:15,627 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-25T17:08:15,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:15,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:15,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:15,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:15,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:15,628 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:15,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/f431f064575046058c96fba48cbecf93 is 50, key is test_row_0/A:col10/1732554495625/Put/seqid=0 2024-11-25T17:08:15,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741919_1095 (size=12301) 2024-11-25T17:08:15,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554555638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554555640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554555641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554555642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554555743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554555745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554555748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554555748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-25T17:08:15,830 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-25T17:08:15,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:15,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-25T17:08:15,833 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:15,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-25T17:08:15,834 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:15,834 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:15,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-25T17:08:15,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554555948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554555953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554555962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:15,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554555973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:15,987 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:15,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-25T17:08:15,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:15,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:15,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:15,988 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:15,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:15,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:16,039 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/f431f064575046058c96fba48cbecf93 2024-11-25T17:08:16,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/8fb16db9525449aab5ac97f734652590 is 50, key is test_row_0/B:col10/1732554495625/Put/seqid=0 2024-11-25T17:08:16,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741920_1096 (size=12301) 2024-11-25T17:08:16,066 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/8fb16db9525449aab5ac97f734652590 2024-11-25T17:08:16,083 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/a508d004352d4ef092731f9649784762 is 50, key is test_row_0/C:col10/1732554495625/Put/seqid=0 2024-11-25T17:08:16,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741921_1097 (size=12301) 2024-11-25T17:08:16,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-25T17:08:16,141 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:16,141 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-25T17:08:16,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:16,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:16,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:16,142 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:16,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:16,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:16,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:16,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554556252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:16,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:16,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554556255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:16,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:16,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554556266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:16,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:16,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554556275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:16,302 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:16,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-25T17:08:16,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:16,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:16,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:16,302 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:16,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:16,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:16,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-25T17:08:16,455 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:16,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-25T17:08:16,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:16,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:16,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:16,456 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:16,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:16,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:16,496 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/a508d004352d4ef092731f9649784762 2024-11-25T17:08:16,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/f431f064575046058c96fba48cbecf93 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f431f064575046058c96fba48cbecf93 2024-11-25T17:08:16,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f431f064575046058c96fba48cbecf93, entries=150, sequenceid=395, filesize=12.0 K 2024-11-25T17:08:16,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/8fb16db9525449aab5ac97f734652590 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8fb16db9525449aab5ac97f734652590 2024-11-25T17:08:16,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8fb16db9525449aab5ac97f734652590, entries=150, sequenceid=395, filesize=12.0 K 2024-11-25T17:08:16,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/a508d004352d4ef092731f9649784762 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a508d004352d4ef092731f9649784762 2024-11-25T17:08:16,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a508d004352d4ef092731f9649784762, entries=150, sequenceid=395, filesize=12.0 K 2024-11-25T17:08:16,530 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 140432b4069c8ca485d8f3971c9e31fe in 903ms, sequenceid=395, compaction requested=true 2024-11-25T17:08:16,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:16,531 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:08:16,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:16,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:16,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:16,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:16,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:16,531 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:08:16,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:16,532 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:08:16,532 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/A is initiating minor compaction (all files) 2024-11-25T17:08:16,532 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/A in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:16,533 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/7dda291fb8b04cb48c8aff53a4e136fc, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/4e82485acd3049648a8875a980af18ea, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b55b0a92753c416d9f17c836660ed3d9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f431f064575046058c96fba48cbecf93] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=48.8 K 2024-11-25T17:08:16,533 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:08:16,533 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/B is initiating minor compaction (all files) 2024-11-25T17:08:16,533 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/B in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:16,533 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/dcc2e8d2a3c240259b747bd342a19b3f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/f796016cb6ac40f9a13763dfb96764cf, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/08f624db275644ce8af56427b524eb6d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8fb16db9525449aab5ac97f734652590] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=48.8 K 2024-11-25T17:08:16,533 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7dda291fb8b04cb48c8aff53a4e136fc, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732554492083 2024-11-25T17:08:16,534 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e82485acd3049648a8875a980af18ea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1732554492720 2024-11-25T17:08:16,534 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting b55b0a92753c416d9f17c836660ed3d9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732554493862 2024-11-25T17:08:16,534 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting f431f064575046058c96fba48cbecf93, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732554495621 2024-11-25T17:08:16,535 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting dcc2e8d2a3c240259b747bd342a19b3f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732554492083 2024-11-25T17:08:16,535 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting f796016cb6ac40f9a13763dfb96764cf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1732554492720 2024-11-25T17:08:16,536 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 08f624db275644ce8af56427b524eb6d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732554493862 2024-11-25T17:08:16,537 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fb16db9525449aab5ac97f734652590, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732554495621 2024-11-25T17:08:16,547 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#A#compaction#83 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:16,548 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/f998e8b36e70438c9c4771f8089431fa is 50, key is test_row_0/A:col10/1732554495625/Put/seqid=0 2024-11-25T17:08:16,555 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#B#compaction#84 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:16,556 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/b40daf592d474b6e8367d6087bacb5db is 50, key is test_row_0/B:col10/1732554495625/Put/seqid=0 2024-11-25T17:08:16,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741922_1098 (size=13255) 2024-11-25T17:08:16,575 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/f998e8b36e70438c9c4771f8089431fa as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f998e8b36e70438c9c4771f8089431fa 2024-11-25T17:08:16,582 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/A of 140432b4069c8ca485d8f3971c9e31fe into f998e8b36e70438c9c4771f8089431fa(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:16,582 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:16,582 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/A, priority=12, startTime=1732554496530; duration=0sec 2024-11-25T17:08:16,582 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:16,582 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:A 2024-11-25T17:08:16,582 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:08:16,584 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:08:16,584 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/C is initiating minor compaction (all files) 2024-11-25T17:08:16,584 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/C in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:16,584 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/3631ad12e3e24a1db37fbdb35ae78a47, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/93763932a9f144c0a1799133c90eadfe, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/81cc5eadc36e4cd5a6f97beb8f8d3941, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a508d004352d4ef092731f9649784762] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=48.8 K 2024-11-25T17:08:16,585 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3631ad12e3e24a1db37fbdb35ae78a47, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732554492083 2024-11-25T17:08:16,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741923_1099 (size=13255) 2024-11-25T17:08:16,586 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93763932a9f144c0a1799133c90eadfe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1732554492720 2024-11-25T17:08:16,586 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81cc5eadc36e4cd5a6f97beb8f8d3941, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1732554493862 2024-11-25T17:08:16,587 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting a508d004352d4ef092731f9649784762, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732554495621 2024-11-25T17:08:16,596 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/b40daf592d474b6e8367d6087bacb5db as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/b40daf592d474b6e8367d6087bacb5db 2024-11-25T17:08:16,608 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:16,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-25T17:08:16,609 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/B of 140432b4069c8ca485d8f3971c9e31fe into b40daf592d474b6e8367d6087bacb5db(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:16,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:16,609 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:16,609 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/B, priority=12, startTime=1732554496531; duration=0sec 2024-11-25T17:08:16,609 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-25T17:08:16,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:16,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:16,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:16,610 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:16,610 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:B 2024-11-25T17:08:16,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:16,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:16,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:16,616 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#C#compaction#85 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:16,619 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/368987d4759c4d70878a6bd0c275fa0e is 50, key is test_row_0/C:col10/1732554495625/Put/seqid=0 2024-11-25T17:08:16,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/047cfc9b6b8e4c2db6a00a35a687bb5b is 50, key is test_row_0/A:col10/1732554495641/Put/seqid=0 2024-11-25T17:08:16,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741925_1101 (size=12301) 2024-11-25T17:08:16,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741924_1100 (size=13221) 2024-11-25T17:08:16,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:16,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:16,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:16,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554556795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:16,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:16,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554556796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:16,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:16,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554556797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:16,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:16,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554556798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:16,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:16,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554556899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:16,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:16,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554556902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:16,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:16,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554556903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:16,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:16,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554556903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:16,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-25T17:08:17,042 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/047cfc9b6b8e4c2db6a00a35a687bb5b 2024-11-25T17:08:17,055 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/368987d4759c4d70878a6bd0c275fa0e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/368987d4759c4d70878a6bd0c275fa0e 2024-11-25T17:08:17,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/1ea32e8cbd5749d4aef1d1a2345a9dbb is 50, key is test_row_0/B:col10/1732554495641/Put/seqid=0 2024-11-25T17:08:17,064 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/C of 140432b4069c8ca485d8f3971c9e31fe into 368987d4759c4d70878a6bd0c275fa0e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:17,064 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:17,064 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/C, priority=12, startTime=1732554496531; duration=0sec 2024-11-25T17:08:17,064 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:17,065 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:C 2024-11-25T17:08:17,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741926_1102 (size=12301) 2024-11-25T17:08:17,088 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/1ea32e8cbd5749d4aef1d1a2345a9dbb 2024-11-25T17:08:17,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/de954067e5fb4023be303720f4604bff is 50, key is test_row_0/C:col10/1732554495641/Put/seqid=0 2024-11-25T17:08:17,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:17,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554557102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:17,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:17,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554557106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:17,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:17,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554557107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:17,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:17,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554557109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:17,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741927_1103 (size=12301) 2024-11-25T17:08:17,127 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/de954067e5fb4023be303720f4604bff 2024-11-25T17:08:17,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/047cfc9b6b8e4c2db6a00a35a687bb5b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/047cfc9b6b8e4c2db6a00a35a687bb5b 2024-11-25T17:08:17,140 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/047cfc9b6b8e4c2db6a00a35a687bb5b, entries=150, sequenceid=411, filesize=12.0 K 2024-11-25T17:08:17,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/1ea32e8cbd5749d4aef1d1a2345a9dbb as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/1ea32e8cbd5749d4aef1d1a2345a9dbb 2024-11-25T17:08:17,149 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/1ea32e8cbd5749d4aef1d1a2345a9dbb, entries=150, sequenceid=411, filesize=12.0 K 2024-11-25T17:08:17,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/de954067e5fb4023be303720f4604bff as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/de954067e5fb4023be303720f4604bff 2024-11-25T17:08:17,161 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/de954067e5fb4023be303720f4604bff, entries=150, sequenceid=411, filesize=12.0 K 2024-11-25T17:08:17,162 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 140432b4069c8ca485d8f3971c9e31fe in 553ms, sequenceid=411, compaction requested=false 2024-11-25T17:08:17,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:17,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:17,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-25T17:08:17,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-25T17:08:17,167 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-25T17:08:17,167 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3290 sec 2024-11-25T17:08:17,168 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.3360 sec 2024-11-25T17:08:17,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:17,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-25T17:08:17,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:17,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:17,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:17,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:17,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:17,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:17,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:17,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554557423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:17,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:17,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554557423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:17,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:17,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554557425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:17,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:17,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554557426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:17,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/2479314c799741a7924495ad0dd1aa03 is 50, key is test_row_0/A:col10/1732554496795/Put/seqid=0 2024-11-25T17:08:17,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741928_1104 (size=12301) 2024-11-25T17:08:17,462 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/2479314c799741a7924495ad0dd1aa03 2024-11-25T17:08:17,483 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/6233b61006554586a94e9fde1fe768a7 is 50, key is test_row_0/B:col10/1732554496795/Put/seqid=0 2024-11-25T17:08:17,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741929_1105 (size=12301) 2024-11-25T17:08:17,528 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:17,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554557527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:17,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:17,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554557528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:17,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:17,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554557531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:17,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:17,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554557538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:17,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:17,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554557733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:17,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:17,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554557733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:17,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:17,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554557734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:17,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:17,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554557741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:17,927 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/6233b61006554586a94e9fde1fe768a7 2024-11-25T17:08:17,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/13db501333a24c9e9de8d5f82deecba5 is 50, key is test_row_0/C:col10/1732554496795/Put/seqid=0 2024-11-25T17:08:17,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-25T17:08:17,939 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-25T17:08:17,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741930_1106 (size=12301) 2024-11-25T17:08:17,945 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:17,946 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/13db501333a24c9e9de8d5f82deecba5 2024-11-25T17:08:17,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-25T17:08:17,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-25T17:08:17,947 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:17,948 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:17,948 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:17,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/2479314c799741a7924495ad0dd1aa03 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/2479314c799741a7924495ad0dd1aa03 2024-11-25T17:08:17,957 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/2479314c799741a7924495ad0dd1aa03, entries=150, sequenceid=437, filesize=12.0 K 2024-11-25T17:08:17,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/6233b61006554586a94e9fde1fe768a7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/6233b61006554586a94e9fde1fe768a7 2024-11-25T17:08:17,964 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/6233b61006554586a94e9fde1fe768a7, entries=150, sequenceid=437, filesize=12.0 K 2024-11-25T17:08:17,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/13db501333a24c9e9de8d5f82deecba5 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/13db501333a24c9e9de8d5f82deecba5 2024-11-25T17:08:17,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/13db501333a24c9e9de8d5f82deecba5, entries=150, sequenceid=437, filesize=12.0 K 2024-11-25T17:08:17,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 140432b4069c8ca485d8f3971c9e31fe in 559ms, sequenceid=437, compaction requested=true 2024-11-25T17:08:17,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:17,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:17,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:17,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:17,971 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:17,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:17,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:17,971 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:17,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:17,972 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:17,972 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/A is initiating minor compaction (all files) 2024-11-25T17:08:17,972 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/A in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:17,973 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f998e8b36e70438c9c4771f8089431fa, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/047cfc9b6b8e4c2db6a00a35a687bb5b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/2479314c799741a7924495ad0dd1aa03] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=37.0 K 2024-11-25T17:08:17,973 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:17,973 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/B is initiating minor compaction (all files) 2024-11-25T17:08:17,973 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/B in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:17,973 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/b40daf592d474b6e8367d6087bacb5db, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/1ea32e8cbd5749d4aef1d1a2345a9dbb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/6233b61006554586a94e9fde1fe768a7] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=37.0 K 2024-11-25T17:08:17,974 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting f998e8b36e70438c9c4771f8089431fa, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732554495621 2024-11-25T17:08:17,974 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 047cfc9b6b8e4c2db6a00a35a687bb5b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732554495631 2024-11-25T17:08:17,974 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting b40daf592d474b6e8367d6087bacb5db, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732554495621 2024-11-25T17:08:17,975 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2479314c799741a7924495ad0dd1aa03, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732554496793 2024-11-25T17:08:17,975 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ea32e8cbd5749d4aef1d1a2345a9dbb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732554495631 2024-11-25T17:08:17,976 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 6233b61006554586a94e9fde1fe768a7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732554496793 2024-11-25T17:08:17,989 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#A#compaction#92 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:17,990 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/bf8e92e79d10434ca6b995cbcb3b646c is 50, key is test_row_0/A:col10/1732554496795/Put/seqid=0 2024-11-25T17:08:17,992 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#B#compaction#93 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:17,992 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/8f93aca8b0384eeabb6b83a65007677f is 50, key is test_row_0/B:col10/1732554496795/Put/seqid=0 2024-11-25T17:08:17,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741931_1107 (size=13357) 2024-11-25T17:08:18,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741932_1108 (size=13357) 2024-11-25T17:08:18,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:18,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-25T17:08:18,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:18,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:18,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:18,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:18,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:18,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:18,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-25T17:08:18,050 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/648c0e7bd0ab4c27bef0d4fe94829c8c is 50, key is test_row_0/A:col10/1732554498036/Put/seqid=0 2024-11-25T17:08:18,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741933_1109 (size=14741) 2024-11-25T17:08:18,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:18,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554558068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:18,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:18,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554558070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:18,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:18,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554558071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:18,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:18,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554558071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:18,101 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:18,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-25T17:08:18,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:18,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:18,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:18,102 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:18,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554558173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:18,175 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:18,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554558173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:18,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:18,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554558175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:18,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:18,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554558175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:18,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-25T17:08:18,254 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:18,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-25T17:08:18,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:18,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:18,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:18,255 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:18,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554558376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:18,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:18,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554558377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:18,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:18,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554558378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:18,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:18,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554558382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:18,408 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:18,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-25T17:08:18,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:18,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:18,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:18,409 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,411 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/bf8e92e79d10434ca6b995cbcb3b646c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/bf8e92e79d10434ca6b995cbcb3b646c 2024-11-25T17:08:18,425 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/A of 140432b4069c8ca485d8f3971c9e31fe into bf8e92e79d10434ca6b995cbcb3b646c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:18,425 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:18,425 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/A, priority=13, startTime=1732554497971; duration=0sec 2024-11-25T17:08:18,426 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:18,426 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:A 2024-11-25T17:08:18,426 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:18,429 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/8f93aca8b0384eeabb6b83a65007677f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8f93aca8b0384eeabb6b83a65007677f 2024-11-25T17:08:18,434 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:18,434 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/C is initiating minor compaction (all files) 2024-11-25T17:08:18,434 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/C in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:18,435 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/368987d4759c4d70878a6bd0c275fa0e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/de954067e5fb4023be303720f4604bff, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/13db501333a24c9e9de8d5f82deecba5] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=36.9 K 2024-11-25T17:08:18,435 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 368987d4759c4d70878a6bd0c275fa0e, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732554495621 2024-11-25T17:08:18,436 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting de954067e5fb4023be303720f4604bff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732554495631 2024-11-25T17:08:18,437 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13db501333a24c9e9de8d5f82deecba5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732554496793 2024-11-25T17:08:18,440 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/B of 140432b4069c8ca485d8f3971c9e31fe into 8f93aca8b0384eeabb6b83a65007677f(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:18,440 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:18,440 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/B, priority=13, startTime=1732554497971; duration=0sec 2024-11-25T17:08:18,440 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:18,440 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:B 2024-11-25T17:08:18,451 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#C#compaction#95 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:18,452 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/c42dc8decc904c58b932dd7873c54c91 is 50, key is test_row_0/C:col10/1732554496795/Put/seqid=0 2024-11-25T17:08:18,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741934_1110 (size=13323) 2024-11-25T17:08:18,460 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/648c0e7bd0ab4c27bef0d4fe94829c8c 2024-11-25T17:08:18,470 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/c42dc8decc904c58b932dd7873c54c91 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/c42dc8decc904c58b932dd7873c54c91 2024-11-25T17:08:18,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/ede2ae7d1cfb48bda89c89e98297be29 is 50, key is test_row_0/B:col10/1732554498036/Put/seqid=0 2024-11-25T17:08:18,480 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/C of 140432b4069c8ca485d8f3971c9e31fe into c42dc8decc904c58b932dd7873c54c91(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:18,480 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:18,480 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/C, priority=13, startTime=1732554497971; duration=0sec 2024-11-25T17:08:18,480 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:18,480 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:C 2024-11-25T17:08:18,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741935_1111 (size=12301) 2024-11-25T17:08:18,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-25T17:08:18,561 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:18,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-25T17:08:18,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:18,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:18,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:18,563 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:18,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554558679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:18,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:18,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554558680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:18,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:18,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554558681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:18,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:18,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554558688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:18,716 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:18,717 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-25T17:08:18,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:18,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:18,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:18,717 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,870 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:18,870 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-25T17:08:18,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:18,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:18,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:18,871 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:18,882 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/ede2ae7d1cfb48bda89c89e98297be29 2024-11-25T17:08:18,897 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/39d74f7ac2b84f2cb61c9c174fa2f907 is 50, key is test_row_0/C:col10/1732554498036/Put/seqid=0 2024-11-25T17:08:18,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741936_1112 (size=12301) 2024-11-25T17:08:18,906 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/39d74f7ac2b84f2cb61c9c174fa2f907 2024-11-25T17:08:18,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/648c0e7bd0ab4c27bef0d4fe94829c8c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/648c0e7bd0ab4c27bef0d4fe94829c8c 2024-11-25T17:08:18,919 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/648c0e7bd0ab4c27bef0d4fe94829c8c, entries=200, sequenceid=449, filesize=14.4 K 2024-11-25T17:08:18,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/ede2ae7d1cfb48bda89c89e98297be29 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ede2ae7d1cfb48bda89c89e98297be29 2024-11-25T17:08:18,925 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ede2ae7d1cfb48bda89c89e98297be29, entries=150, sequenceid=449, filesize=12.0 K 2024-11-25T17:08:18,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/39d74f7ac2b84f2cb61c9c174fa2f907 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/39d74f7ac2b84f2cb61c9c174fa2f907 2024-11-25T17:08:18,932 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/39d74f7ac2b84f2cb61c9c174fa2f907, entries=150, sequenceid=449, filesize=12.0 K 2024-11-25T17:08:18,933 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 140432b4069c8ca485d8f3971c9e31fe in 895ms, sequenceid=449, compaction requested=false 2024-11-25T17:08:18,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:19,023 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:19,023 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-25T17:08:19,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:19,024 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-25T17:08:19,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:19,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:19,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:19,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:19,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:19,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:19,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/08cf0f4b25db4cc894298617971f7c41 is 50, key is test_row_0/A:col10/1732554498063/Put/seqid=0 2024-11-25T17:08:19,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741937_1113 (size=12301) 2024-11-25T17:08:19,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-25T17:08:19,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:19,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:19,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:19,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554559194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:19,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:19,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554559195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:19,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:19,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554559195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:19,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:19,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554559196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:19,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:19,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554559298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:19,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:19,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554559299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:19,300 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:19,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554559300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:19,437 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=476 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/08cf0f4b25db4cc894298617971f7c41 2024-11-25T17:08:19,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/fb22abdc8bd248219e94910def1e370d is 50, key is test_row_0/B:col10/1732554498063/Put/seqid=0 2024-11-25T17:08:19,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741938_1114 (size=12301) 2024-11-25T17:08:19,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:19,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554559500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:19,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:19,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554559501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:19,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:19,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554559502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:19,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:19,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554559805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:19,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:19,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554559805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:19,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:19,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554559806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:19,852 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=476 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/fb22abdc8bd248219e94910def1e370d 2024-11-25T17:08:19,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/963b32787f38412fa0c6ade3fd8e8bd4 is 50, key is test_row_0/C:col10/1732554498063/Put/seqid=0 2024-11-25T17:08:19,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741939_1115 (size=12301) 2024-11-25T17:08:19,899 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=476 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/963b32787f38412fa0c6ade3fd8e8bd4 2024-11-25T17:08:19,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/08cf0f4b25db4cc894298617971f7c41 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/08cf0f4b25db4cc894298617971f7c41 2024-11-25T17:08:19,910 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/08cf0f4b25db4cc894298617971f7c41, entries=150, sequenceid=476, filesize=12.0 K 2024-11-25T17:08:19,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/fb22abdc8bd248219e94910def1e370d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/fb22abdc8bd248219e94910def1e370d 2024-11-25T17:08:19,916 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/fb22abdc8bd248219e94910def1e370d, entries=150, sequenceid=476, filesize=12.0 K 2024-11-25T17:08:19,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/963b32787f38412fa0c6ade3fd8e8bd4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/963b32787f38412fa0c6ade3fd8e8bd4 2024-11-25T17:08:19,924 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/963b32787f38412fa0c6ade3fd8e8bd4, entries=150, sequenceid=476, filesize=12.0 K 2024-11-25T17:08:19,925 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 140432b4069c8ca485d8f3971c9e31fe in 901ms, sequenceid=476, compaction requested=true 2024-11-25T17:08:19,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:19,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:19,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-25T17:08:19,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-25T17:08:19,928 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-25T17:08:19,929 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9790 sec 2024-11-25T17:08:19,931 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 1.9840 sec 2024-11-25T17:08:20,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-25T17:08:20,051 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-25T17:08:20,053 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:20,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-11-25T17:08:20,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-25T17:08:20,054 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:20,055 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:20,055 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:20,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-25T17:08:20,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:20,201 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-25T17:08:20,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:20,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:20,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:20,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:20,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:20,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:20,207 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:20,213 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-25T17:08:20,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:20,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:20,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:20,213 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/a0e5ca3aa0b244159f84c81085e96b02 is 50, key is test_row_0/A:col10/1732554500200/Put/seqid=0 2024-11-25T17:08:20,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:20,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554560268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:20,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741940_1116 (size=14741) 2024-11-25T17:08:20,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:20,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554560309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:20,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:20,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:20,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554560318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:20,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554560318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:20,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-25T17:08:20,367 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:20,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-25T17:08:20,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:20,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:20,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:20,370 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:20,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554560370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:20,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,523 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:20,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-25T17:08:20,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:20,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:20,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:20,523 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:20,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554560574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:20,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-25T17:08:20,676 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:20,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-25T17:08:20,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:20,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:20,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:20,676 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,698 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/a0e5ca3aa0b244159f84c81085e96b02 2024-11-25T17:08:20,715 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/21b07d7e9f7e439abce9b2c10af6d3b8 is 50, key is test_row_0/B:col10/1732554500200/Put/seqid=0 2024-11-25T17:08:20,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741941_1117 (size=12301) 2024-11-25T17:08:20,829 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:20,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-25T17:08:20,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:20,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:20,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:20,830 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:20,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554560877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:20,910 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x03883f7b to 127.0.0.1:56265 2024-11-25T17:08:20,910 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4b5cad1a to 127.0.0.1:56265 2024-11-25T17:08:20,910 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:20,911 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70267494 to 127.0.0.1:56265 2024-11-25T17:08:20,911 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:20,911 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:20,916 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x767a8485 to 127.0.0.1:56265 2024-11-25T17:08:20,916 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:20,982 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:20,983 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-25T17:08:20,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:20,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:20,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:20,983 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:20,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:21,123 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/21b07d7e9f7e439abce9b2c10af6d3b8 2024-11-25T17:08:21,130 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/9e1821d4d5ca4bb9b7571f14f12c9769 is 50, key is test_row_0/C:col10/1732554500200/Put/seqid=0 2024-11-25T17:08:21,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741942_1118 (size=12301) 2024-11-25T17:08:21,136 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:21,136 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-25T17:08:21,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:21,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:21,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:21,137 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:21,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:21,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:21,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-25T17:08:21,288 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:21,289 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-25T17:08:21,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:21,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:21,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:21,290 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:21,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:21,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:21,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:21,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40206 deadline: 1732554561322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:21,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:21,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40198 deadline: 1732554561328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:21,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:21,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40222 deadline: 1732554561329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:21,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:21,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40230 deadline: 1732554561386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:21,443 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:21,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-25T17:08:21,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:21,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:21,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:21,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:21,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:21,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:21,535 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/9e1821d4d5ca4bb9b7571f14f12c9769 2024-11-25T17:08:21,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/a0e5ca3aa0b244159f84c81085e96b02 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a0e5ca3aa0b244159f84c81085e96b02 2024-11-25T17:08:21,544 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a0e5ca3aa0b244159f84c81085e96b02, entries=200, sequenceid=489, filesize=14.4 K 2024-11-25T17:08:21,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/21b07d7e9f7e439abce9b2c10af6d3b8 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/21b07d7e9f7e439abce9b2c10af6d3b8 2024-11-25T17:08:21,549 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/21b07d7e9f7e439abce9b2c10af6d3b8, entries=150, sequenceid=489, filesize=12.0 K 2024-11-25T17:08:21,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/9e1821d4d5ca4bb9b7571f14f12c9769 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9e1821d4d5ca4bb9b7571f14f12c9769 2024-11-25T17:08:21,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9e1821d4d5ca4bb9b7571f14f12c9769, entries=150, sequenceid=489, filesize=12.0 K 2024-11-25T17:08:21,555 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 140432b4069c8ca485d8f3971c9e31fe in 1354ms, sequenceid=489, compaction requested=true 2024-11-25T17:08:21,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:21,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:21,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:21,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:21,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:21,556 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:08:21,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 140432b4069c8ca485d8f3971c9e31fe:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:21,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:21,556 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:08:21,557 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55140 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:08:21,557 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50260 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:08:21,557 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/B is initiating minor compaction (all files) 2024-11-25T17:08:21,557 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/A is initiating minor compaction (all files) 2024-11-25T17:08:21,557 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/B in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:21,557 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/A in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:21,557 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8f93aca8b0384eeabb6b83a65007677f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ede2ae7d1cfb48bda89c89e98297be29, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/fb22abdc8bd248219e94910def1e370d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/21b07d7e9f7e439abce9b2c10af6d3b8] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=49.1 K 2024-11-25T17:08:21,557 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/bf8e92e79d10434ca6b995cbcb3b646c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/648c0e7bd0ab4c27bef0d4fe94829c8c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/08cf0f4b25db4cc894298617971f7c41, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a0e5ca3aa0b244159f84c81085e96b02] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=53.8 K 2024-11-25T17:08:21,558 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf8e92e79d10434ca6b995cbcb3b646c, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732554496793 2024-11-25T17:08:21,558 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f93aca8b0384eeabb6b83a65007677f, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732554496793 2024-11-25T17:08:21,558 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 648c0e7bd0ab4c27bef0d4fe94829c8c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1732554497420 2024-11-25T17:08:21,558 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting ede2ae7d1cfb48bda89c89e98297be29, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1732554497420 2024-11-25T17:08:21,559 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08cf0f4b25db4cc894298617971f7c41, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=476, earliestPutTs=1732554498062 2024-11-25T17:08:21,559 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting fb22abdc8bd248219e94910def1e370d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=476, earliestPutTs=1732554498062 2024-11-25T17:08:21,559 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0e5ca3aa0b244159f84c81085e96b02, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=489, earliestPutTs=1732554499191 2024-11-25T17:08:21,559 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 21b07d7e9f7e439abce9b2c10af6d3b8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=489, earliestPutTs=1732554499191 2024-11-25T17:08:21,569 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#A#compaction#104 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:21,569 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#B#compaction#105 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:21,570 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/a55dbc19465247cdaa84f327a4c7a6f2 is 50, key is test_row_0/A:col10/1732554500200/Put/seqid=0 2024-11-25T17:08:21,570 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/7b92d10a0a274e1ea804c720ea2f3260 is 50, key is test_row_0/B:col10/1732554500200/Put/seqid=0 2024-11-25T17:08:21,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741943_1119 (size=13493) 2024-11-25T17:08:21,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741944_1120 (size=13493) 2024-11-25T17:08:21,596 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:21,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-25T17:08:21,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:21,597 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-25T17:08:21,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:21,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:21,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:21,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:21,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:21,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:21,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/d0165a66fc0649e3b61a42c415d446c5 is 50, key is test_row_0/A:col10/1732554500263/Put/seqid=0 2024-11-25T17:08:21,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741945_1121 (size=12301) 2024-11-25T17:08:21,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:21,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. as already flushing 2024-11-25T17:08:21,718 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x736f1673 to 127.0.0.1:56265 2024-11-25T17:08:21,718 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:21,981 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/7b92d10a0a274e1ea804c720ea2f3260 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/7b92d10a0a274e1ea804c720ea2f3260 2024-11-25T17:08:21,981 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/a55dbc19465247cdaa84f327a4c7a6f2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a55dbc19465247cdaa84f327a4c7a6f2 2024-11-25T17:08:21,988 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/B of 140432b4069c8ca485d8f3971c9e31fe into 7b92d10a0a274e1ea804c720ea2f3260(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:21,988 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/A of 140432b4069c8ca485d8f3971c9e31fe into a55dbc19465247cdaa84f327a4c7a6f2(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:21,988 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:21,988 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:21,988 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/B, priority=12, startTime=1732554501556; duration=0sec 2024-11-25T17:08:21,988 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/A, priority=12, startTime=1732554501556; duration=0sec 2024-11-25T17:08:21,988 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:21,988 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:B 2024-11-25T17:08:21,988 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:21,988 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:A 2024-11-25T17:08:21,988 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:08:21,991 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50226 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:08:21,991 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 140432b4069c8ca485d8f3971c9e31fe/C is initiating minor compaction (all files) 2024-11-25T17:08:21,991 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 140432b4069c8ca485d8f3971c9e31fe/C in TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:21,991 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/c42dc8decc904c58b932dd7873c54c91, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/39d74f7ac2b84f2cb61c9c174fa2f907, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/963b32787f38412fa0c6ade3fd8e8bd4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9e1821d4d5ca4bb9b7571f14f12c9769] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp, totalSize=49.0 K 2024-11-25T17:08:21,992 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting c42dc8decc904c58b932dd7873c54c91, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=437, earliestPutTs=1732554496793 2024-11-25T17:08:21,992 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 39d74f7ac2b84f2cb61c9c174fa2f907, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1732554497420 2024-11-25T17:08:21,993 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 963b32787f38412fa0c6ade3fd8e8bd4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=476, earliestPutTs=1732554498062 2024-11-25T17:08:21,993 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e1821d4d5ca4bb9b7571f14f12c9769, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=489, earliestPutTs=1732554499191 2024-11-25T17:08:22,004 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 140432b4069c8ca485d8f3971c9e31fe#C#compaction#107 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:22,004 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/db0cae705db744dbb3cf6a1aaa6e9674 is 50, key is test_row_0/C:col10/1732554500200/Put/seqid=0 2024-11-25T17:08:22,006 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=512 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/d0165a66fc0649e3b61a42c415d446c5 2024-11-25T17:08:22,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741946_1122 (size=13459) 2024-11-25T17:08:22,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/e0ad748dd5b8436b9b091240bd4c711a is 50, key is test_row_0/B:col10/1732554500263/Put/seqid=0 2024-11-25T17:08:22,017 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/db0cae705db744dbb3cf6a1aaa6e9674 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/db0cae705db744dbb3cf6a1aaa6e9674 2024-11-25T17:08:22,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741947_1123 (size=12301) 2024-11-25T17:08:22,030 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 140432b4069c8ca485d8f3971c9e31fe/C of 140432b4069c8ca485d8f3971c9e31fe into db0cae705db744dbb3cf6a1aaa6e9674(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:22,030 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:22,030 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe., storeName=140432b4069c8ca485d8f3971c9e31fe/C, priority=12, startTime=1732554501556; duration=0sec 2024-11-25T17:08:22,030 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:22,030 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 140432b4069c8ca485d8f3971c9e31fe:C 2024-11-25T17:08:22,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-25T17:08:22,390 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79d38d10 to 127.0.0.1:56265 2024-11-25T17:08:22,390 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:22,426 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=512 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/e0ad748dd5b8436b9b091240bd4c711a 2024-11-25T17:08:22,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/cc033358e9914faa98c3a5825bb278df is 50, key is test_row_0/C:col10/1732554500263/Put/seqid=0 2024-11-25T17:08:22,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741948_1124 (size=12301) 2024-11-25T17:08:22,623 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T17:08:22,838 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=512 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/cc033358e9914faa98c3a5825bb278df 2024-11-25T17:08:22,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/d0165a66fc0649e3b61a42c415d446c5 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d0165a66fc0649e3b61a42c415d446c5 2024-11-25T17:08:22,848 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d0165a66fc0649e3b61a42c415d446c5, entries=150, sequenceid=512, filesize=12.0 K 2024-11-25T17:08:22,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/e0ad748dd5b8436b9b091240bd4c711a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/e0ad748dd5b8436b9b091240bd4c711a 2024-11-25T17:08:22,853 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/e0ad748dd5b8436b9b091240bd4c711a, entries=150, sequenceid=512, filesize=12.0 K 2024-11-25T17:08:22,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/cc033358e9914faa98c3a5825bb278df as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/cc033358e9914faa98c3a5825bb278df 2024-11-25T17:08:22,858 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/cc033358e9914faa98c3a5825bb278df, entries=150, sequenceid=512, filesize=12.0 K 2024-11-25T17:08:22,859 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=13.42 KB/13740 for 140432b4069c8ca485d8f3971c9e31fe in 1262ms, sequenceid=512, compaction requested=false 2024-11-25T17:08:22,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:22,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:22,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-11-25T17:08:22,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-11-25T17:08:22,862 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-25T17:08:22,862 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8050 sec 2024-11-25T17:08:22,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 2.8090 sec 2024-11-25T17:08:23,328 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72b32f98 to 127.0.0.1:56265 2024-11-25T17:08:23,328 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:23,340 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bbb5d8a to 127.0.0.1:56265 2024-11-25T17:08:23,340 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:23,345 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62c08aa2 to 127.0.0.1:56265 2024-11-25T17:08:23,345 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:24,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-25T17:08:24,161 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-25T17:08:24,161 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-25T17:08:24,161 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 94 2024-11-25T17:08:24,161 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 111 2024-11-25T17:08:24,161 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 92 2024-11-25T17:08:24,161 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 12 2024-11-25T17:08:24,161 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 100 2024-11-25T17:08:24,161 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-25T17:08:24,161 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5180 2024-11-25T17:08:24,161 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5116 2024-11-25T17:08:24,161 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-25T17:08:24,161 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2261 2024-11-25T17:08:24,161 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6779 rows 2024-11-25T17:08:24,161 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2261 2024-11-25T17:08:24,161 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6779 rows 2024-11-25T17:08:24,161 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-25T17:08:24,162 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x24869052 to 127.0.0.1:56265 2024-11-25T17:08:24,162 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:24,166 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-25T17:08:24,170 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-25T17:08:24,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:24,177 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554504176"}]},"ts":"1732554504176"} 2024-11-25T17:08:24,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-25T17:08:24,178 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-25T17:08:24,180 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-25T17:08:24,181 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-25T17:08:24,185 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=140432b4069c8ca485d8f3971c9e31fe, UNASSIGN}] 2024-11-25T17:08:24,186 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=140432b4069c8ca485d8f3971c9e31fe, UNASSIGN 2024-11-25T17:08:24,186 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=140432b4069c8ca485d8f3971c9e31fe, regionState=CLOSING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:08:24,187 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-25T17:08:24,187 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; CloseRegionProcedure 140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:08:24,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-25T17:08:24,342 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:24,343 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(124): Close 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:24,344 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-25T17:08:24,344 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1681): Closing 140432b4069c8ca485d8f3971c9e31fe, disabling compactions & flushes 2024-11-25T17:08:24,344 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:24,344 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:24,344 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. after waiting 0 ms 2024-11-25T17:08:24,344 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:24,345 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(2837): Flushing 140432b4069c8ca485d8f3971c9e31fe 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-25T17:08:24,345 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=A 2024-11-25T17:08:24,345 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:24,345 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=B 2024-11-25T17:08:24,345 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:24,345 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 140432b4069c8ca485d8f3971c9e31fe, store=C 2024-11-25T17:08:24,345 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:24,349 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/550dec81b4e24943968af2d3533de4de is 50, key is test_row_0/A:col10/1732554502389/Put/seqid=0 2024-11-25T17:08:24,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741949_1125 (size=12301) 2024-11-25T17:08:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-25T17:08:24,754 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/550dec81b4e24943968af2d3533de4de 2024-11-25T17:08:24,761 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/df5297d688bd4935a3541a90b4db4f5a is 50, key is test_row_0/B:col10/1732554502389/Put/seqid=0 2024-11-25T17:08:24,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741950_1126 (size=12301) 2024-11-25T17:08:24,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-25T17:08:25,166 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/df5297d688bd4935a3541a90b4db4f5a 2024-11-25T17:08:25,173 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/4dfbfe5597a4448b8233ca37f6af14cd is 50, key is test_row_0/C:col10/1732554502389/Put/seqid=0 2024-11-25T17:08:25,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741951_1127 (size=12301) 2024-11-25T17:08:25,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-25T17:08:25,578 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/4dfbfe5597a4448b8233ca37f6af14cd 2024-11-25T17:08:25,583 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/A/550dec81b4e24943968af2d3533de4de as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/550dec81b4e24943968af2d3533de4de 2024-11-25T17:08:25,586 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/550dec81b4e24943968af2d3533de4de, entries=150, sequenceid=523, filesize=12.0 K 2024-11-25T17:08:25,587 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/B/df5297d688bd4935a3541a90b4db4f5a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/df5297d688bd4935a3541a90b4db4f5a 2024-11-25T17:08:25,591 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/df5297d688bd4935a3541a90b4db4f5a, entries=150, sequenceid=523, filesize=12.0 K 2024-11-25T17:08:25,592 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/.tmp/C/4dfbfe5597a4448b8233ca37f6af14cd as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/4dfbfe5597a4448b8233ca37f6af14cd 2024-11-25T17:08:25,596 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/4dfbfe5597a4448b8233ca37f6af14cd, entries=150, sequenceid=523, filesize=12.0 K 2024-11-25T17:08:25,597 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 140432b4069c8ca485d8f3971c9e31fe in 1253ms, sequenceid=523, compaction requested=true 2024-11-25T17:08:25,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a216b78599644a03a713e555a0667e4f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/8be079adbcd749ee8a569dfcbcabee53, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/7282eb3bf6ec4351a244666fed03663c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/e4cb729e85e94f0583e5757ae0f1f8a9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d181c0fc5da14b109679fe217291dca5, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/1d94363949f24a0d84ed2e7a367e8bde, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/6709bdb2f1964b5d8cb472f27264bdeb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/5d15fe563ccb4aa6a70288fd09f07933, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/576edb3098184db88c3fad5ebd0cd1f9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a25964cadf1245edac6f6b69475a8519, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/1a5b06d3c41944b1b752c98c501218ac, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/205708f2209f458cb0dc65cbc93934f4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/436bb8f8da774b64a2ff43574b565192, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/8ecc3734e1594558840ea266c6ee0327, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b28cb8bde25a42c280138f253699d2a8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/bffb98e22fb5411c86d36d7fe828c4c3, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f214be0bae424d82a678ba0882e23de7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b9bdf47ab97048f7ad4bf10a65cfff5b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d95756b6bf5b4933b2747e611708fe4b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/35d8bf619f204459a517d180a56c0dfd, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d381046c34d9467d8a8cacb96af257e8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/2a5e5de73f444f86ad815b0f720c41ac, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/5b7745e0047c4d58bd2ab822d5174b6c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/75bb5b3a46d0482a9a7b781b046063f6, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/7dda291fb8b04cb48c8aff53a4e136fc, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/4e82485acd3049648a8875a980af18ea, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b55b0a92753c416d9f17c836660ed3d9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f998e8b36e70438c9c4771f8089431fa, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f431f064575046058c96fba48cbecf93, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/047cfc9b6b8e4c2db6a00a35a687bb5b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/bf8e92e79d10434ca6b995cbcb3b646c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/2479314c799741a7924495ad0dd1aa03, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/648c0e7bd0ab4c27bef0d4fe94829c8c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/08cf0f4b25db4cc894298617971f7c41, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a0e5ca3aa0b244159f84c81085e96b02] to archive 2024-11-25T17:08:25,601 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:08:25,607 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a216b78599644a03a713e555a0667e4f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a216b78599644a03a713e555a0667e4f 2024-11-25T17:08:25,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/8be079adbcd749ee8a569dfcbcabee53 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/8be079adbcd749ee8a569dfcbcabee53 2024-11-25T17:08:25,610 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/7282eb3bf6ec4351a244666fed03663c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/7282eb3bf6ec4351a244666fed03663c 2024-11-25T17:08:25,612 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/e4cb729e85e94f0583e5757ae0f1f8a9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/e4cb729e85e94f0583e5757ae0f1f8a9 2024-11-25T17:08:25,613 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d181c0fc5da14b109679fe217291dca5 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d181c0fc5da14b109679fe217291dca5 2024-11-25T17:08:25,615 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/1d94363949f24a0d84ed2e7a367e8bde to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/1d94363949f24a0d84ed2e7a367e8bde 2024-11-25T17:08:25,616 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/6709bdb2f1964b5d8cb472f27264bdeb to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/6709bdb2f1964b5d8cb472f27264bdeb 2024-11-25T17:08:25,617 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/5d15fe563ccb4aa6a70288fd09f07933 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/5d15fe563ccb4aa6a70288fd09f07933 2024-11-25T17:08:25,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/576edb3098184db88c3fad5ebd0cd1f9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/576edb3098184db88c3fad5ebd0cd1f9 2024-11-25T17:08:25,620 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a25964cadf1245edac6f6b69475a8519 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a25964cadf1245edac6f6b69475a8519 2024-11-25T17:08:25,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/1a5b06d3c41944b1b752c98c501218ac to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/1a5b06d3c41944b1b752c98c501218ac 2024-11-25T17:08:25,624 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/205708f2209f458cb0dc65cbc93934f4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/205708f2209f458cb0dc65cbc93934f4 2024-11-25T17:08:25,625 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/436bb8f8da774b64a2ff43574b565192 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/436bb8f8da774b64a2ff43574b565192 2024-11-25T17:08:25,627 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/8ecc3734e1594558840ea266c6ee0327 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/8ecc3734e1594558840ea266c6ee0327 2024-11-25T17:08:25,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b28cb8bde25a42c280138f253699d2a8 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b28cb8bde25a42c280138f253699d2a8 2024-11-25T17:08:25,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/bffb98e22fb5411c86d36d7fe828c4c3 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/bffb98e22fb5411c86d36d7fe828c4c3 2024-11-25T17:08:25,645 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f214be0bae424d82a678ba0882e23de7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f214be0bae424d82a678ba0882e23de7 2024-11-25T17:08:25,647 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b9bdf47ab97048f7ad4bf10a65cfff5b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b9bdf47ab97048f7ad4bf10a65cfff5b 2024-11-25T17:08:25,649 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d95756b6bf5b4933b2747e611708fe4b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d95756b6bf5b4933b2747e611708fe4b 2024-11-25T17:08:25,651 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/35d8bf619f204459a517d180a56c0dfd to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/35d8bf619f204459a517d180a56c0dfd 2024-11-25T17:08:25,653 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d381046c34d9467d8a8cacb96af257e8 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d381046c34d9467d8a8cacb96af257e8 2024-11-25T17:08:25,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/2a5e5de73f444f86ad815b0f720c41ac to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/2a5e5de73f444f86ad815b0f720c41ac 2024-11-25T17:08:25,656 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/5b7745e0047c4d58bd2ab822d5174b6c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/5b7745e0047c4d58bd2ab822d5174b6c 2024-11-25T17:08:25,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/75bb5b3a46d0482a9a7b781b046063f6 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/75bb5b3a46d0482a9a7b781b046063f6 2024-11-25T17:08:25,659 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/7dda291fb8b04cb48c8aff53a4e136fc to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/7dda291fb8b04cb48c8aff53a4e136fc 2024-11-25T17:08:25,661 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/4e82485acd3049648a8875a980af18ea to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/4e82485acd3049648a8875a980af18ea 2024-11-25T17:08:25,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b55b0a92753c416d9f17c836660ed3d9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/b55b0a92753c416d9f17c836660ed3d9 2024-11-25T17:08:25,664 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f998e8b36e70438c9c4771f8089431fa to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f998e8b36e70438c9c4771f8089431fa 2024-11-25T17:08:25,669 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f431f064575046058c96fba48cbecf93 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/f431f064575046058c96fba48cbecf93 2024-11-25T17:08:25,671 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/047cfc9b6b8e4c2db6a00a35a687bb5b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/047cfc9b6b8e4c2db6a00a35a687bb5b 2024-11-25T17:08:25,673 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/bf8e92e79d10434ca6b995cbcb3b646c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/bf8e92e79d10434ca6b995cbcb3b646c 2024-11-25T17:08:25,675 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/2479314c799741a7924495ad0dd1aa03 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/2479314c799741a7924495ad0dd1aa03 2024-11-25T17:08:25,679 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/648c0e7bd0ab4c27bef0d4fe94829c8c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/648c0e7bd0ab4c27bef0d4fe94829c8c 2024-11-25T17:08:25,685 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/08cf0f4b25db4cc894298617971f7c41 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/08cf0f4b25db4cc894298617971f7c41 2024-11-25T17:08:25,688 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a0e5ca3aa0b244159f84c81085e96b02 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a0e5ca3aa0b244159f84c81085e96b02 2024-11-25T17:08:25,706 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/2e729f8171f14fe49fcd73c35eed26da, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/c404eafedd324277a1ca43cffd53d554, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/0cc9aa268a60446aa1d7412bc5ddfb6b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/21ab45edca22440fa4a231e7ff45fe6b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8a1e4e0c56634f519e19bbd6455c086d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/582bcf1db0304ef98e2a0d4ee8ee6b6e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/f5468bc8ca2c450d87ecc3ce4d5f78ad, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ee95d568b8c946d7a95db4fd97bbbd8a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/db73f8df4db140f0954bc90dd97207f4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/eaa3be1f0aa5443d8e2e91f5bf655476, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/3e1e0983e6104b2496c3c14cd3be9c89, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/815a08e092b34601bcf917602482f2eb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/68bc0f5743054ad88583985b65b48cf7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/2324b6abee6047009d52d44b0bde80ea, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/5a0e34c4165546aebc0605ba36ba5ef7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ecec7d75de3f47ca914b84b7adfd60c3, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/a4124726d03741d4b056067a3ea2cea5, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/b02c9b3399214fe0bcd9aa2506390636, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/a6955de4ab7940228432460f1c117684, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/c9241fc3a2e0401ca9e156f8ef32e803, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/d81aeaa2a0454c8390cb40b31875de01, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/b9e0e207118b4f5d86e85ada2e97191d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/9bf16ad1038d4dfd992fdec1c9ca44ce, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/dcc2e8d2a3c240259b747bd342a19b3f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8c81e843f4e241b3aa9600841965ff82, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/f796016cb6ac40f9a13763dfb96764cf, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/08f624db275644ce8af56427b524eb6d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/b40daf592d474b6e8367d6087bacb5db, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8fb16db9525449aab5ac97f734652590, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/1ea32e8cbd5749d4aef1d1a2345a9dbb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8f93aca8b0384eeabb6b83a65007677f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/6233b61006554586a94e9fde1fe768a7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ede2ae7d1cfb48bda89c89e98297be29, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/fb22abdc8bd248219e94910def1e370d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/21b07d7e9f7e439abce9b2c10af6d3b8] to archive 2024-11-25T17:08:25,713 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:08:25,716 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/2e729f8171f14fe49fcd73c35eed26da to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/2e729f8171f14fe49fcd73c35eed26da 2024-11-25T17:08:25,718 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/c404eafedd324277a1ca43cffd53d554 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/c404eafedd324277a1ca43cffd53d554 2024-11-25T17:08:25,721 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/0cc9aa268a60446aa1d7412bc5ddfb6b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/0cc9aa268a60446aa1d7412bc5ddfb6b 2024-11-25T17:08:25,723 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/21ab45edca22440fa4a231e7ff45fe6b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/21ab45edca22440fa4a231e7ff45fe6b 2024-11-25T17:08:25,724 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8a1e4e0c56634f519e19bbd6455c086d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8a1e4e0c56634f519e19bbd6455c086d 2024-11-25T17:08:25,730 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/582bcf1db0304ef98e2a0d4ee8ee6b6e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/582bcf1db0304ef98e2a0d4ee8ee6b6e 2024-11-25T17:08:25,732 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/f5468bc8ca2c450d87ecc3ce4d5f78ad to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/f5468bc8ca2c450d87ecc3ce4d5f78ad 2024-11-25T17:08:25,735 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ee95d568b8c946d7a95db4fd97bbbd8a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ee95d568b8c946d7a95db4fd97bbbd8a 2024-11-25T17:08:25,736 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/db73f8df4db140f0954bc90dd97207f4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/db73f8df4db140f0954bc90dd97207f4 2024-11-25T17:08:25,738 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/eaa3be1f0aa5443d8e2e91f5bf655476 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/eaa3be1f0aa5443d8e2e91f5bf655476 2024-11-25T17:08:25,740 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/3e1e0983e6104b2496c3c14cd3be9c89 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/3e1e0983e6104b2496c3c14cd3be9c89 2024-11-25T17:08:25,742 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/815a08e092b34601bcf917602482f2eb to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/815a08e092b34601bcf917602482f2eb 2024-11-25T17:08:25,744 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/68bc0f5743054ad88583985b65b48cf7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/68bc0f5743054ad88583985b65b48cf7 2024-11-25T17:08:25,746 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/2324b6abee6047009d52d44b0bde80ea to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/2324b6abee6047009d52d44b0bde80ea 2024-11-25T17:08:25,748 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/5a0e34c4165546aebc0605ba36ba5ef7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/5a0e34c4165546aebc0605ba36ba5ef7 2024-11-25T17:08:25,749 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ecec7d75de3f47ca914b84b7adfd60c3 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ecec7d75de3f47ca914b84b7adfd60c3 2024-11-25T17:08:25,751 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/a4124726d03741d4b056067a3ea2cea5 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/a4124726d03741d4b056067a3ea2cea5 2024-11-25T17:08:25,752 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/b02c9b3399214fe0bcd9aa2506390636 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/b02c9b3399214fe0bcd9aa2506390636 2024-11-25T17:08:25,753 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/a6955de4ab7940228432460f1c117684 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/a6955de4ab7940228432460f1c117684 2024-11-25T17:08:25,755 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/c9241fc3a2e0401ca9e156f8ef32e803 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/c9241fc3a2e0401ca9e156f8ef32e803 2024-11-25T17:08:25,757 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/d81aeaa2a0454c8390cb40b31875de01 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/d81aeaa2a0454c8390cb40b31875de01 2024-11-25T17:08:25,759 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/b9e0e207118b4f5d86e85ada2e97191d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/b9e0e207118b4f5d86e85ada2e97191d 2024-11-25T17:08:25,761 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/9bf16ad1038d4dfd992fdec1c9ca44ce to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/9bf16ad1038d4dfd992fdec1c9ca44ce 2024-11-25T17:08:25,763 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/dcc2e8d2a3c240259b747bd342a19b3f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/dcc2e8d2a3c240259b747bd342a19b3f 2024-11-25T17:08:25,764 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8c81e843f4e241b3aa9600841965ff82 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8c81e843f4e241b3aa9600841965ff82 2024-11-25T17:08:25,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/f796016cb6ac40f9a13763dfb96764cf to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/f796016cb6ac40f9a13763dfb96764cf 2024-11-25T17:08:25,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/08f624db275644ce8af56427b524eb6d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/08f624db275644ce8af56427b524eb6d 2024-11-25T17:08:25,768 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/b40daf592d474b6e8367d6087bacb5db to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/b40daf592d474b6e8367d6087bacb5db 2024-11-25T17:08:25,770 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8fb16db9525449aab5ac97f734652590 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8fb16db9525449aab5ac97f734652590 2024-11-25T17:08:25,773 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/1ea32e8cbd5749d4aef1d1a2345a9dbb to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/1ea32e8cbd5749d4aef1d1a2345a9dbb 2024-11-25T17:08:25,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8f93aca8b0384eeabb6b83a65007677f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/8f93aca8b0384eeabb6b83a65007677f 2024-11-25T17:08:25,775 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/6233b61006554586a94e9fde1fe768a7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/6233b61006554586a94e9fde1fe768a7 2024-11-25T17:08:25,777 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ede2ae7d1cfb48bda89c89e98297be29 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/ede2ae7d1cfb48bda89c89e98297be29 2024-11-25T17:08:25,778 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/fb22abdc8bd248219e94910def1e370d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/fb22abdc8bd248219e94910def1e370d 2024-11-25T17:08:25,779 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/21b07d7e9f7e439abce9b2c10af6d3b8 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/21b07d7e9f7e439abce9b2c10af6d3b8 2024-11-25T17:08:25,781 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/2d4ef9af0d6b42419eac9be978ad2e67, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/efd100a4069b48f2bf82d9c26926584e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/e770b63cac054ffeb33db74be560b6d8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/543ef6374d95495a880a7e522913bb9e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/cd5c57ab104e4d9b895febde7bc0d0c9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9f86d23738f54701a3109ad6bebc2255, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/4e146e178e214ce78cd0345387a10fc4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/f88361ef67624afca17e03dde5833939, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/cbb7d54c46a14904bc7474c322b1c88e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/11582c3278634c79a335db9f7ef91785, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/31f8115dbe0445e181b0f8c44934fa94, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/e76b672dd37443a4ab5b2e26af0b0578, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/07393e512fca4910b70e3ba3e8b8334b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/3e111e18476844d98cc6ee32c360aebd, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/d142742e12a04a0c95f21fafd07455f1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/037a3976f9974495ba0075e3a4480e8e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9fd710de6430470ba893eab5b30d9064, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/c945ce97f28e43d7a82f2f02d1a1889a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/b72f05efbc574ccb887470fbc7217486, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/2d6de6484561424191df0c9e3c13fdf8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/8d4f18697dd548d2ad048380d5968da4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a70bb0b1783c4be893235cab49e508d4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/3631ad12e3e24a1db37fbdb35ae78a47, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a8f9e31c410a435caab32fd9c6b4a1ca, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/93763932a9f144c0a1799133c90eadfe, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/81cc5eadc36e4cd5a6f97beb8f8d3941, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/368987d4759c4d70878a6bd0c275fa0e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a508d004352d4ef092731f9649784762, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/de954067e5fb4023be303720f4604bff, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/c42dc8decc904c58b932dd7873c54c91, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/13db501333a24c9e9de8d5f82deecba5, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/39d74f7ac2b84f2cb61c9c174fa2f907, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/963b32787f38412fa0c6ade3fd8e8bd4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9e1821d4d5ca4bb9b7571f14f12c9769] to archive 2024-11-25T17:08:25,782 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:08:25,783 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/2d4ef9af0d6b42419eac9be978ad2e67 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/2d4ef9af0d6b42419eac9be978ad2e67 2024-11-25T17:08:25,784 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/efd100a4069b48f2bf82d9c26926584e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/efd100a4069b48f2bf82d9c26926584e 2024-11-25T17:08:25,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/e770b63cac054ffeb33db74be560b6d8 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/e770b63cac054ffeb33db74be560b6d8 2024-11-25T17:08:25,787 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/543ef6374d95495a880a7e522913bb9e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/543ef6374d95495a880a7e522913bb9e 2024-11-25T17:08:25,788 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/cd5c57ab104e4d9b895febde7bc0d0c9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/cd5c57ab104e4d9b895febde7bc0d0c9 2024-11-25T17:08:25,789 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9f86d23738f54701a3109ad6bebc2255 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9f86d23738f54701a3109ad6bebc2255 2024-11-25T17:08:25,790 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/4e146e178e214ce78cd0345387a10fc4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/4e146e178e214ce78cd0345387a10fc4 2024-11-25T17:08:25,791 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/f88361ef67624afca17e03dde5833939 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/f88361ef67624afca17e03dde5833939 2024-11-25T17:08:25,793 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/cbb7d54c46a14904bc7474c322b1c88e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/cbb7d54c46a14904bc7474c322b1c88e 2024-11-25T17:08:25,794 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/11582c3278634c79a335db9f7ef91785 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/11582c3278634c79a335db9f7ef91785 2024-11-25T17:08:25,796 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/31f8115dbe0445e181b0f8c44934fa94 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/31f8115dbe0445e181b0f8c44934fa94 2024-11-25T17:08:25,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/e76b672dd37443a4ab5b2e26af0b0578 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/e76b672dd37443a4ab5b2e26af0b0578 2024-11-25T17:08:25,799 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/07393e512fca4910b70e3ba3e8b8334b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/07393e512fca4910b70e3ba3e8b8334b 2024-11-25T17:08:25,800 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/3e111e18476844d98cc6ee32c360aebd to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/3e111e18476844d98cc6ee32c360aebd 2024-11-25T17:08:25,802 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/d142742e12a04a0c95f21fafd07455f1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/d142742e12a04a0c95f21fafd07455f1 2024-11-25T17:08:25,803 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/037a3976f9974495ba0075e3a4480e8e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/037a3976f9974495ba0075e3a4480e8e 2024-11-25T17:08:25,804 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9fd710de6430470ba893eab5b30d9064 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9fd710de6430470ba893eab5b30d9064 2024-11-25T17:08:25,806 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/c945ce97f28e43d7a82f2f02d1a1889a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/c945ce97f28e43d7a82f2f02d1a1889a 2024-11-25T17:08:25,807 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/b72f05efbc574ccb887470fbc7217486 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/b72f05efbc574ccb887470fbc7217486 2024-11-25T17:08:25,808 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/2d6de6484561424191df0c9e3c13fdf8 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/2d6de6484561424191df0c9e3c13fdf8 2024-11-25T17:08:25,810 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/8d4f18697dd548d2ad048380d5968da4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/8d4f18697dd548d2ad048380d5968da4 2024-11-25T17:08:25,811 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a70bb0b1783c4be893235cab49e508d4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a70bb0b1783c4be893235cab49e508d4 2024-11-25T17:08:25,812 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/3631ad12e3e24a1db37fbdb35ae78a47 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/3631ad12e3e24a1db37fbdb35ae78a47 2024-11-25T17:08:25,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a8f9e31c410a435caab32fd9c6b4a1ca to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a8f9e31c410a435caab32fd9c6b4a1ca 2024-11-25T17:08:25,815 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/93763932a9f144c0a1799133c90eadfe to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/93763932a9f144c0a1799133c90eadfe 2024-11-25T17:08:25,816 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/81cc5eadc36e4cd5a6f97beb8f8d3941 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/81cc5eadc36e4cd5a6f97beb8f8d3941 2024-11-25T17:08:25,817 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/368987d4759c4d70878a6bd0c275fa0e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/368987d4759c4d70878a6bd0c275fa0e 2024-11-25T17:08:25,819 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a508d004352d4ef092731f9649784762 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/a508d004352d4ef092731f9649784762 2024-11-25T17:08:25,820 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/de954067e5fb4023be303720f4604bff to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/de954067e5fb4023be303720f4604bff 2024-11-25T17:08:25,821 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/c42dc8decc904c58b932dd7873c54c91 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/c42dc8decc904c58b932dd7873c54c91 2024-11-25T17:08:25,823 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/13db501333a24c9e9de8d5f82deecba5 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/13db501333a24c9e9de8d5f82deecba5 2024-11-25T17:08:25,824 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/39d74f7ac2b84f2cb61c9c174fa2f907 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/39d74f7ac2b84f2cb61c9c174fa2f907 2024-11-25T17:08:25,825 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/963b32787f38412fa0c6ade3fd8e8bd4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/963b32787f38412fa0c6ade3fd8e8bd4 2024-11-25T17:08:25,826 DEBUG [StoreCloser-TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9e1821d4d5ca4bb9b7571f14f12c9769 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/9e1821d4d5ca4bb9b7571f14f12c9769 2024-11-25T17:08:25,830 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/recovered.edits/526.seqid, newMaxSeqId=526, maxSeqId=1 2024-11-25T17:08:25,833 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe. 2024-11-25T17:08:25,833 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1635): Region close journal for 140432b4069c8ca485d8f3971c9e31fe: 2024-11-25T17:08:25,835 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(170): Closed 140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:25,835 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=140432b4069c8ca485d8f3971c9e31fe, regionState=CLOSED 2024-11-25T17:08:25,838 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-25T17:08:25,838 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseRegionProcedure 140432b4069c8ca485d8f3971c9e31fe, server=6579369734b6,41865,1732554474464 in 1.6490 sec 2024-11-25T17:08:25,839 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-11-25T17:08:25,839 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=140432b4069c8ca485d8f3971c9e31fe, UNASSIGN in 1.6530 sec 2024-11-25T17:08:25,841 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-25T17:08:25,841 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6590 sec 2024-11-25T17:08:25,842 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554505842"}]},"ts":"1732554505842"} 2024-11-25T17:08:25,844 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-25T17:08:25,846 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-25T17:08:25,847 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6750 sec 2024-11-25T17:08:26,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-25T17:08:26,281 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-25T17:08:26,284 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-25T17:08:26,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:26,289 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:26,291 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=38, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:26,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-25T17:08:26,294 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:26,298 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/recovered.edits] 2024-11-25T17:08:26,301 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/550dec81b4e24943968af2d3533de4de to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/550dec81b4e24943968af2d3533de4de 2024-11-25T17:08:26,303 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a55dbc19465247cdaa84f327a4c7a6f2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/a55dbc19465247cdaa84f327a4c7a6f2 2024-11-25T17:08:26,305 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d0165a66fc0649e3b61a42c415d446c5 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/A/d0165a66fc0649e3b61a42c415d446c5 2024-11-25T17:08:26,307 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/7b92d10a0a274e1ea804c720ea2f3260 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/7b92d10a0a274e1ea804c720ea2f3260 2024-11-25T17:08:26,309 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/df5297d688bd4935a3541a90b4db4f5a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/df5297d688bd4935a3541a90b4db4f5a 2024-11-25T17:08:26,310 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/e0ad748dd5b8436b9b091240bd4c711a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/B/e0ad748dd5b8436b9b091240bd4c711a 2024-11-25T17:08:26,313 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/4dfbfe5597a4448b8233ca37f6af14cd to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/4dfbfe5597a4448b8233ca37f6af14cd 2024-11-25T17:08:26,314 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/cc033358e9914faa98c3a5825bb278df to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/cc033358e9914faa98c3a5825bb278df 2024-11-25T17:08:26,315 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/db0cae705db744dbb3cf6a1aaa6e9674 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/C/db0cae705db744dbb3cf6a1aaa6e9674 2024-11-25T17:08:26,318 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/recovered.edits/526.seqid to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe/recovered.edits/526.seqid 2024-11-25T17:08:26,319 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/140432b4069c8ca485d8f3971c9e31fe 2024-11-25T17:08:26,319 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-25T17:08:26,325 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=38, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:26,329 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-25T17:08:26,333 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-25T17:08:26,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-25T17:08:26,406 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-25T17:08:26,409 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=38, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:26,409 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-25T17:08:26,410 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732554506409"}]},"ts":"9223372036854775807"} 2024-11-25T17:08:26,420 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-25T17:08:26,420 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 140432b4069c8ca485d8f3971c9e31fe, NAME => 'TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe.', STARTKEY => '', ENDKEY => ''}] 2024-11-25T17:08:26,420 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-25T17:08:26,421 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732554506420"}]},"ts":"9223372036854775807"} 2024-11-25T17:08:26,424 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-25T17:08:26,427 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=38, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:26,429 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 142 msec 2024-11-25T17:08:26,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-25T17:08:26,606 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 38 completed 2024-11-25T17:08:26,621 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=238 (was 218) Potentially hanging thread: RS_CLOSE_REGION-regionserver/6579369734b6:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x327ba634-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;6579369734b6:41865-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/6579369734b6:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x327ba634-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1427976722_22 at /127.0.0.1:43944 [Waiting for operation #360] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400252136_22 at /127.0.0.1:49064 [Waiting for operation #343] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x327ba634-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x327ba634-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=454 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=650 (was 582) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2214 (was 3313) 2024-11-25T17:08:26,631 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=238, OpenFileDescriptor=454, MaxFileDescriptor=1048576, SystemLoadAverage=650, ProcessCount=11, AvailableMemoryMB=2214 2024-11-25T17:08:26,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-25T17:08:26,633 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T17:08:26,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:26,635 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T17:08:26,635 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:26,635 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 39 2024-11-25T17:08:26,636 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T17:08:26,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-25T17:08:26,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741952_1128 (size=963) 2024-11-25T17:08:26,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-25T17:08:26,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-25T17:08:27,045 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4 2024-11-25T17:08:27,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741953_1129 (size=53) 2024-11-25T17:08:27,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-25T17:08:27,451 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:08:27,451 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing c60040e46a02e10beaa963566dc1e39f, disabling compactions & flushes 2024-11-25T17:08:27,451 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:27,451 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:27,451 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. after waiting 0 ms 2024-11-25T17:08:27,451 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:27,451 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:27,451 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:27,452 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T17:08:27,453 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732554507452"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732554507452"}]},"ts":"1732554507452"} 2024-11-25T17:08:27,454 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-25T17:08:27,455 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T17:08:27,455 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554507455"}]},"ts":"1732554507455"} 2024-11-25T17:08:27,456 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-25T17:08:27,460 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c60040e46a02e10beaa963566dc1e39f, ASSIGN}] 2024-11-25T17:08:27,461 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c60040e46a02e10beaa963566dc1e39f, ASSIGN 2024-11-25T17:08:27,461 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=c60040e46a02e10beaa963566dc1e39f, ASSIGN; state=OFFLINE, location=6579369734b6,41865,1732554474464; forceNewPlan=false, retain=false 2024-11-25T17:08:27,612 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=c60040e46a02e10beaa963566dc1e39f, regionState=OPENING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:08:27,613 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; OpenRegionProcedure c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:08:27,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-25T17:08:27,764 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:27,768 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:27,768 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7285): Opening region: {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} 2024-11-25T17:08:27,769 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:27,769 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:08:27,769 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7327): checking encryption for c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:27,769 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7330): checking classloading for c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:27,770 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:27,772 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:08:27,772 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c60040e46a02e10beaa963566dc1e39f columnFamilyName A 2024-11-25T17:08:27,772 DEBUG [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:27,773 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.HStore(327): Store=c60040e46a02e10beaa963566dc1e39f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:08:27,773 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:27,774 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:08:27,775 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c60040e46a02e10beaa963566dc1e39f columnFamilyName B 2024-11-25T17:08:27,776 DEBUG [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:27,776 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.HStore(327): Store=c60040e46a02e10beaa963566dc1e39f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:08:27,776 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:27,777 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:08:27,778 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c60040e46a02e10beaa963566dc1e39f columnFamilyName C 2024-11-25T17:08:27,778 DEBUG [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:27,778 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.HStore(327): Store=c60040e46a02e10beaa963566dc1e39f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:08:27,778 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:27,779 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:27,779 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:27,781 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T17:08:27,782 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1085): writing seq id for c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:27,784 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T17:08:27,785 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1102): Opened c60040e46a02e10beaa963566dc1e39f; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60328782, jitterRate=-0.101031094789505}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T17:08:27,786 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1001): Region open journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:27,787 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., pid=41, masterSystemTime=1732554507764 2024-11-25T17:08:27,788 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:27,789 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:27,789 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=c60040e46a02e10beaa963566dc1e39f, regionState=OPEN, openSeqNum=2, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:08:27,792 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-25T17:08:27,792 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; OpenRegionProcedure c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 in 177 msec 2024-11-25T17:08:27,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-11-25T17:08:27,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c60040e46a02e10beaa963566dc1e39f, ASSIGN in 332 msec 2024-11-25T17:08:27,794 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T17:08:27,795 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554507794"}]},"ts":"1732554507794"} 2024-11-25T17:08:27,795 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-25T17:08:27,799 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T17:08:27,800 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1660 sec 2024-11-25T17:08:28,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-25T17:08:28,742 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 39 completed 2024-11-25T17:08:28,743 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x546bf8b8 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5caaf139 2024-11-25T17:08:28,747 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e560c7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:28,748 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:28,750 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45634, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:28,752 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T17:08:28,753 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48050, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T17:08:28,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-25T17:08:28,759 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.3 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T17:08:28,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=42, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:28,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741954_1130 (size=999) 2024-11-25T17:08:29,177 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-25T17:08:29,177 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-25T17:08:29,180 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-25T17:08:29,189 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c60040e46a02e10beaa963566dc1e39f, REOPEN/MOVE}] 2024-11-25T17:08:29,189 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c60040e46a02e10beaa963566dc1e39f, REOPEN/MOVE 2024-11-25T17:08:29,190 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=c60040e46a02e10beaa963566dc1e39f, regionState=CLOSING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:08:29,191 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-25T17:08:29,191 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE; CloseRegionProcedure c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:08:29,342 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:29,343 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(124): Close c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:29,343 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-25T17:08:29,343 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1681): Closing c60040e46a02e10beaa963566dc1e39f, disabling compactions & flushes 2024-11-25T17:08:29,343 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:29,343 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:29,343 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. after waiting 0 ms 2024-11-25T17:08:29,343 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:29,347 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-25T17:08:29,347 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:29,347 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1635): Region close journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:29,347 WARN [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegionServer(3786): Not adding moved region record: c60040e46a02e10beaa963566dc1e39f to self. 2024-11-25T17:08:29,349 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(170): Closed c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:29,350 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=c60040e46a02e10beaa963566dc1e39f, regionState=CLOSED 2024-11-25T17:08:29,352 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-11-25T17:08:29,352 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; CloseRegionProcedure c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 in 160 msec 2024-11-25T17:08:29,353 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=c60040e46a02e10beaa963566dc1e39f, REOPEN/MOVE; state=CLOSED, location=6579369734b6,41865,1732554474464; forceNewPlan=false, retain=true 2024-11-25T17:08:29,503 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=c60040e46a02e10beaa963566dc1e39f, regionState=OPENING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:08:29,505 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=44, state=RUNNABLE; OpenRegionProcedure c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:08:29,657 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:29,660 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:29,660 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7285): Opening region: {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} 2024-11-25T17:08:29,661 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:29,661 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:08:29,661 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7327): checking encryption for c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:29,661 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7330): checking classloading for c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:29,663 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:29,664 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:08:29,670 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c60040e46a02e10beaa963566dc1e39f columnFamilyName A 2024-11-25T17:08:29,672 DEBUG [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:29,673 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.HStore(327): Store=c60040e46a02e10beaa963566dc1e39f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:08:29,673 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:29,674 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:08:29,674 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c60040e46a02e10beaa963566dc1e39f columnFamilyName B 2024-11-25T17:08:29,674 DEBUG [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:29,675 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.HStore(327): Store=c60040e46a02e10beaa963566dc1e39f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:08:29,675 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:29,675 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:08:29,676 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c60040e46a02e10beaa963566dc1e39f columnFamilyName C 2024-11-25T17:08:29,676 DEBUG [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:29,676 INFO [StoreOpener-c60040e46a02e10beaa963566dc1e39f-1 {}] regionserver.HStore(327): Store=c60040e46a02e10beaa963566dc1e39f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:08:29,676 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:29,677 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:29,678 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:29,679 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T17:08:29,681 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1085): writing seq id for c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:29,681 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1102): Opened c60040e46a02e10beaa963566dc1e39f; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64841691, jitterRate=-0.03378351032733917}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T17:08:29,682 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1001): Region open journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:29,683 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., pid=46, masterSystemTime=1732554509657 2024-11-25T17:08:29,684 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:29,685 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:29,685 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=c60040e46a02e10beaa963566dc1e39f, regionState=OPEN, openSeqNum=5, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:08:29,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=44 2024-11-25T17:08:29,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=44, state=SUCCESS; OpenRegionProcedure c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 in 181 msec 2024-11-25T17:08:29,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-11-25T17:08:29,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c60040e46a02e10beaa963566dc1e39f, REOPEN/MOVE in 499 msec 2024-11-25T17:08:29,693 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-25T17:08:29,693 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 511 msec 2024-11-25T17:08:29,696 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 933 msec 2024-11-25T17:08:29,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=42 2024-11-25T17:08:29,705 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2089ec29 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bcbdbdb 2024-11-25T17:08:29,711 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46c2c778, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:29,712 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cae6c5c to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79982672 2024-11-25T17:08:29,715 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@433e2b26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:29,717 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x491ea2ee to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b55744e 2024-11-25T17:08:29,719 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@176c5c1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:29,720 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0b44b1e5 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@454f1431 2024-11-25T17:08:29,724 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24f64590, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:29,725 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42e904d8 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@505d5ccd 2024-11-25T17:08:29,730 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46114993, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:29,731 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x465dc764 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a4c53ed 2024-11-25T17:08:29,734 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@367f47f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:29,735 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x00cb464a to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68f0be85 2024-11-25T17:08:29,738 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@247c0c93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:29,739 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78cafade to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@152377d4 2024-11-25T17:08:29,741 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@517ff977, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:29,742 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14c16cd4 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a52344f 2024-11-25T17:08:29,745 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3448d233, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:29,748 DEBUG [hconnection-0x7b9ff4cd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:29,749 DEBUG [hconnection-0x6717d53b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:29,749 DEBUG [hconnection-0x68b772af-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:29,750 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45650, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:29,750 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45648, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:29,751 DEBUG [hconnection-0x5148cc5b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:29,752 DEBUG [hconnection-0x5b825a9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:29,753 DEBUG [hconnection-0x5ac15ae3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:29,753 DEBUG [hconnection-0x3db62a21-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:29,753 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45662, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:29,753 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45676, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:29,753 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45678, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:29,754 DEBUG [hconnection-0x3b6534b6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:29,754 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45696, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:29,754 DEBUG [hconnection-0x477ee78f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:29,755 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45698, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:29,755 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:29,755 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45694, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:29,755 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45710, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:29,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-25T17:08:29,758 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:29,759 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:29,760 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:29,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-25T17:08:29,771 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:08:29,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:29,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:29,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:29,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:29,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:29,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:29,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:29,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:29,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554569823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:29,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:29,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:29,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554569826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:29,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554569823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:29,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:29,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554569831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:29,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:29,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554569831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:29,851 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112500373dc86e9548c09333b75cf391093f_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554509766/Put/seqid=0 2024-11-25T17:08:29,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-25T17:08:29,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741955_1131 (size=12154) 2024-11-25T17:08:29,908 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:29,912 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:29,912 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-25T17:08:29,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:29,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:29,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:29,913 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:29,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:29,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:29,914 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112500373dc86e9548c09333b75cf391093f_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112500373dc86e9548c09333b75cf391093f_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:29,916 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/4a2bb12651244c3db5dd04d0cc936e87, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:29,925 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/4a2bb12651244c3db5dd04d0cc936e87 is 175, key is test_row_0/A:col10/1732554509766/Put/seqid=0 2024-11-25T17:08:29,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:29,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554569932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:29,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:29,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554569935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:29,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:29,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554569936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:29,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:29,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:29,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554569936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:29,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554569936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:29,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741956_1132 (size=30955) 2024-11-25T17:08:29,959 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/4a2bb12651244c3db5dd04d0cc936e87 2024-11-25T17:08:29,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/e7d9962c1039411cb0ddc79f25f71e78 is 50, key is test_row_0/B:col10/1732554509766/Put/seqid=0 2024-11-25T17:08:30,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741957_1133 (size=12001) 2024-11-25T17:08:30,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-25T17:08:30,065 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:30,067 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-25T17:08:30,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:30,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:30,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:30,067 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:30,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:30,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:30,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:30,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554570137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:30,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:30,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554570140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:30,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:30,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554570141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:30,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:30,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554570141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:30,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:30,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554570141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:30,219 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:30,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-25T17:08:30,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:30,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:30,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:30,220 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:30,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:30,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:30,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-25T17:08:30,372 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:30,373 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-25T17:08:30,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:30,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:30,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:30,373 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:30,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:30,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:30,414 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/e7d9962c1039411cb0ddc79f25f71e78 2024-11-25T17:08:30,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:30,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554570440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:30,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/587f8d0b39eb47bbb74680fdc07beb02 is 50, key is test_row_0/C:col10/1732554509766/Put/seqid=0 2024-11-25T17:08:30,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:30,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554570443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:30,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554570445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:30,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554570445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:30,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554570445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:30,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741958_1134 (size=12001) 2024-11-25T17:08:30,454 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/587f8d0b39eb47bbb74680fdc07beb02 2024-11-25T17:08:30,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/4a2bb12651244c3db5dd04d0cc936e87 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/4a2bb12651244c3db5dd04d0cc936e87 2024-11-25T17:08:30,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/4a2bb12651244c3db5dd04d0cc936e87, entries=150, sequenceid=16, filesize=30.2 K 2024-11-25T17:08:30,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/e7d9962c1039411cb0ddc79f25f71e78 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/e7d9962c1039411cb0ddc79f25f71e78 2024-11-25T17:08:30,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/e7d9962c1039411cb0ddc79f25f71e78, entries=150, sequenceid=16, filesize=11.7 K 2024-11-25T17:08:30,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/587f8d0b39eb47bbb74680fdc07beb02 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/587f8d0b39eb47bbb74680fdc07beb02 2024-11-25T17:08:30,497 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/587f8d0b39eb47bbb74680fdc07beb02, entries=150, sequenceid=16, filesize=11.7 K 2024-11-25T17:08:30,501 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for c60040e46a02e10beaa963566dc1e39f in 731ms, sequenceid=16, compaction requested=false 2024-11-25T17:08:30,501 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-25T17:08:30,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:30,531 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:30,532 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-25T17:08:30,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:30,532 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-25T17:08:30,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:30,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:30,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:30,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:30,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:30,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:30,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125e93f3f6c279c47e1b3b46c94b5362f8f_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554509808/Put/seqid=0 2024-11-25T17:08:30,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741959_1135 (size=12154) 2024-11-25T17:08:30,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-25T17:08:30,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:30,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:30,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:30,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554570956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:30,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:30,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554570957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:30,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:30,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554570957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:30,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:30,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554570959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:30,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:30,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554570960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:30,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:30,996 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125e93f3f6c279c47e1b3b46c94b5362f8f_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125e93f3f6c279c47e1b3b46c94b5362f8f_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:30,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/2fbc3e6ce00f4dc1a7e496bda8395767, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:30,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/2fbc3e6ce00f4dc1a7e496bda8395767 is 175, key is test_row_0/A:col10/1732554509808/Put/seqid=0 2024-11-25T17:08:31,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741960_1136 (size=30955) 2024-11-25T17:08:31,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:31,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554571061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:31,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:31,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554571061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:31,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:31,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554571062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:31,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:31,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554571063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:31,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:31,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554571063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:31,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:31,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554571265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:31,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:31,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554571266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:31,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:31,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554571267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:31,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:31,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554571268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:31,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:31,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554571273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:31,375 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-25T17:08:31,380 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T17:08:31,381 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48062, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T17:08:31,405 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/2fbc3e6ce00f4dc1a7e496bda8395767 2024-11-25T17:08:31,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/cd03b6a0d383465b9bd75e59b9e839a4 is 50, key is test_row_0/B:col10/1732554509808/Put/seqid=0 2024-11-25T17:08:31,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741961_1137 (size=12001) 2024-11-25T17:08:31,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:31,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554571569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:31,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:31,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554571569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:31,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:31,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554571570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:31,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:31,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554571573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:31,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:31,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554571576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:31,833 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/cd03b6a0d383465b9bd75e59b9e839a4 2024-11-25T17:08:31,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/c4e8b18614ad4d0ea8244b2d29cebd5c is 50, key is test_row_0/C:col10/1732554509808/Put/seqid=0 2024-11-25T17:08:31,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741962_1138 (size=12001) 2024-11-25T17:08:31,856 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/c4e8b18614ad4d0ea8244b2d29cebd5c 2024-11-25T17:08:31,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/2fbc3e6ce00f4dc1a7e496bda8395767 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/2fbc3e6ce00f4dc1a7e496bda8395767 2024-11-25T17:08:31,868 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/2fbc3e6ce00f4dc1a7e496bda8395767, entries=150, sequenceid=41, filesize=30.2 K 2024-11-25T17:08:31,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-25T17:08:31,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/cd03b6a0d383465b9bd75e59b9e839a4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/cd03b6a0d383465b9bd75e59b9e839a4 2024-11-25T17:08:31,875 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/cd03b6a0d383465b9bd75e59b9e839a4, entries=150, sequenceid=41, filesize=11.7 K 2024-11-25T17:08:31,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/c4e8b18614ad4d0ea8244b2d29cebd5c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/c4e8b18614ad4d0ea8244b2d29cebd5c 2024-11-25T17:08:31,883 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/c4e8b18614ad4d0ea8244b2d29cebd5c, entries=150, sequenceid=41, filesize=11.7 K 2024-11-25T17:08:31,885 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for c60040e46a02e10beaa963566dc1e39f in 1353ms, sequenceid=41, compaction requested=false 2024-11-25T17:08:31,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:31,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:31,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-25T17:08:31,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-25T17:08:31,888 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-25T17:08:31,888 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1280 sec 2024-11-25T17:08:31,892 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 2.1360 sec 2024-11-25T17:08:32,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:32,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-25T17:08:32,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:32,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:32,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:32,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:32,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:32,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:32,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125807f81f1441746d7b8c732d3022a0842_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554512074/Put/seqid=0 2024-11-25T17:08:32,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741963_1139 (size=17034) 2024-11-25T17:08:32,108 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:32,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554572105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554572107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554572109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554572109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554572110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,115 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125807f81f1441746d7b8c732d3022a0842_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125807f81f1441746d7b8c732d3022a0842_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:32,116 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/216b64a17ac949bbb5431cfb92700068, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:32,117 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/216b64a17ac949bbb5431cfb92700068 is 175, key is test_row_0/A:col10/1732554512074/Put/seqid=0 2024-11-25T17:08:32,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741964_1140 (size=48139) 2024-11-25T17:08:32,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554572210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554572213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554572213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554572213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,217 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554572214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554572416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554572419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554572419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554572419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554572420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,562 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/216b64a17ac949bbb5431cfb92700068 2024-11-25T17:08:32,575 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/9eed1923d6e2413796c274671c9b27ec is 50, key is test_row_0/B:col10/1732554512074/Put/seqid=0 2024-11-25T17:08:32,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741965_1141 (size=12001) 2024-11-25T17:08:32,585 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/9eed1923d6e2413796c274671c9b27ec 2024-11-25T17:08:32,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/a747d79dd8074779b7f6b6941c2c0fba is 50, key is test_row_0/C:col10/1732554512074/Put/seqid=0 2024-11-25T17:08:32,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741966_1142 (size=12001) 2024-11-25T17:08:32,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554572721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554572722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554572724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554572727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:32,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:32,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554572728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,003 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/a747d79dd8074779b7f6b6941c2c0fba 2024-11-25T17:08:33,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/216b64a17ac949bbb5431cfb92700068 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/216b64a17ac949bbb5431cfb92700068 2024-11-25T17:08:33,014 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/216b64a17ac949bbb5431cfb92700068, entries=250, sequenceid=55, filesize=47.0 K 2024-11-25T17:08:33,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/9eed1923d6e2413796c274671c9b27ec as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/9eed1923d6e2413796c274671c9b27ec 2024-11-25T17:08:33,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/9eed1923d6e2413796c274671c9b27ec, entries=150, sequenceid=55, filesize=11.7 K 2024-11-25T17:08:33,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/a747d79dd8074779b7f6b6941c2c0fba as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/a747d79dd8074779b7f6b6941c2c0fba 2024-11-25T17:08:33,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/a747d79dd8074779b7f6b6941c2c0fba, entries=150, sequenceid=55, filesize=11.7 K 2024-11-25T17:08:33,030 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for c60040e46a02e10beaa963566dc1e39f in 954ms, sequenceid=55, compaction requested=true 2024-11-25T17:08:33,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:33,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:33,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:33,031 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:33,031 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:33,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:33,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:33,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:33,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:33,047 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:33,047 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/A is initiating minor compaction (all files) 2024-11-25T17:08:33,047 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/A in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:33,047 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/4a2bb12651244c3db5dd04d0cc936e87, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/2fbc3e6ce00f4dc1a7e496bda8395767, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/216b64a17ac949bbb5431cfb92700068] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=107.5 K 2024-11-25T17:08:33,047 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:33,047 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/4a2bb12651244c3db5dd04d0cc936e87, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/2fbc3e6ce00f4dc1a7e496bda8395767, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/216b64a17ac949bbb5431cfb92700068] 2024-11-25T17:08:33,048 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:33,048 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/B is initiating minor compaction (all files) 2024-11-25T17:08:33,048 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/B in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:33,048 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/e7d9962c1039411cb0ddc79f25f71e78, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/cd03b6a0d383465b9bd75e59b9e839a4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/9eed1923d6e2413796c274671c9b27ec] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=35.2 K 2024-11-25T17:08:33,048 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a2bb12651244c3db5dd04d0cc936e87, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732554509766 2024-11-25T17:08:33,049 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting e7d9962c1039411cb0ddc79f25f71e78, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732554509766 2024-11-25T17:08:33,049 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2fbc3e6ce00f4dc1a7e496bda8395767, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732554509808 2024-11-25T17:08:33,050 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting cd03b6a0d383465b9bd75e59b9e839a4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732554509808 2024-11-25T17:08:33,050 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 216b64a17ac949bbb5431cfb92700068, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732554510954 2024-11-25T17:08:33,050 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 9eed1923d6e2413796c274671c9b27ec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732554510957 2024-11-25T17:08:33,063 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:33,064 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#B#compaction#122 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:33,065 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/7bee49e7cd294be2907d6ccc193be33d is 50, key is test_row_0/B:col10/1732554512074/Put/seqid=0 2024-11-25T17:08:33,092 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241125dcf8714b2f0a45feabfffd506233164d_c60040e46a02e10beaa963566dc1e39f store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:33,099 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241125dcf8714b2f0a45feabfffd506233164d_c60040e46a02e10beaa963566dc1e39f, store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:33,099 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125dcf8714b2f0a45feabfffd506233164d_c60040e46a02e10beaa963566dc1e39f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:33,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741967_1143 (size=12104) 2024-11-25T17:08:33,110 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/7bee49e7cd294be2907d6ccc193be33d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/7bee49e7cd294be2907d6ccc193be33d 2024-11-25T17:08:33,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741968_1144 (size=4469) 2024-11-25T17:08:33,123 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/B of c60040e46a02e10beaa963566dc1e39f into 7bee49e7cd294be2907d6ccc193be33d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:33,123 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:33,123 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/B, priority=13, startTime=1732554513031; duration=0sec 2024-11-25T17:08:33,123 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:33,123 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:B 2024-11-25T17:08:33,123 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:33,125 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#A#compaction#123 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:33,125 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:33,125 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/C is initiating minor compaction (all files) 2024-11-25T17:08:33,125 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/C in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:33,125 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/587f8d0b39eb47bbb74680fdc07beb02, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/c4e8b18614ad4d0ea8244b2d29cebd5c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/a747d79dd8074779b7f6b6941c2c0fba] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=35.2 K 2024-11-25T17:08:33,127 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/2da7bfca8e3343d99872cf7a0f474ce9 is 175, key is test_row_0/A:col10/1732554512074/Put/seqid=0 2024-11-25T17:08:33,129 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 587f8d0b39eb47bbb74680fdc07beb02, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732554509766 2024-11-25T17:08:33,130 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting c4e8b18614ad4d0ea8244b2d29cebd5c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732554509808 2024-11-25T17:08:33,130 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting a747d79dd8074779b7f6b6941c2c0fba, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732554510957 2024-11-25T17:08:33,140 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#C#compaction#124 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:33,141 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/5374b1c66bd04d3a8cdf8cbb2e34eb4e is 50, key is test_row_0/C:col10/1732554512074/Put/seqid=0 2024-11-25T17:08:33,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741969_1145 (size=31058) 2024-11-25T17:08:33,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741970_1146 (size=12104) 2024-11-25T17:08:33,192 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/5374b1c66bd04d3a8cdf8cbb2e34eb4e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/5374b1c66bd04d3a8cdf8cbb2e34eb4e 2024-11-25T17:08:33,198 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/C of c60040e46a02e10beaa963566dc1e39f into 5374b1c66bd04d3a8cdf8cbb2e34eb4e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:33,198 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:33,198 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/C, priority=13, startTime=1732554513031; duration=0sec 2024-11-25T17:08:33,199 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:33,199 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:C 2024-11-25T17:08:33,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:33,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-25T17:08:33,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:33,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:33,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:33,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:33,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:33,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:33,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554573246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554573249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554573250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554573251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554573253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125b546b59465ba456190f4ac2bcc6fa678_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554512106/Put/seqid=0 2024-11-25T17:08:33,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741971_1147 (size=17034) 2024-11-25T17:08:33,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554573356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554573356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554573356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554573356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554573356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554573561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554573561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554573562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554573562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554573563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,577 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/2da7bfca8e3343d99872cf7a0f474ce9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/2da7bfca8e3343d99872cf7a0f474ce9 2024-11-25T17:08:33,583 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/A of c60040e46a02e10beaa963566dc1e39f into 2da7bfca8e3343d99872cf7a0f474ce9(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:33,583 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:33,583 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/A, priority=13, startTime=1732554513030; duration=0sec 2024-11-25T17:08:33,583 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:33,583 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:A 2024-11-25T17:08:33,683 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:33,688 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125b546b59465ba456190f4ac2bcc6fa678_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125b546b59465ba456190f4ac2bcc6fa678_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:33,689 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/ab3dabf5b5f24a7c937bb68f1dd0fc70, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:33,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/ab3dabf5b5f24a7c937bb68f1dd0fc70 is 175, key is test_row_0/A:col10/1732554512106/Put/seqid=0 2024-11-25T17:08:33,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741972_1148 (size=48139) 2024-11-25T17:08:33,697 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=82, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/ab3dabf5b5f24a7c937bb68f1dd0fc70 2024-11-25T17:08:33,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/d8ac8ae1798b46fea63c34cf888d1f71 is 50, key is test_row_0/B:col10/1732554512106/Put/seqid=0 2024-11-25T17:08:33,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741973_1149 (size=12001) 2024-11-25T17:08:33,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554573865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554573866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554573866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-25T17:08:33,869 INFO [Thread-652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-25T17:08:33,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:33,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554573869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554573870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:33,871 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:33,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-25T17:08:33,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-25T17:08:33,872 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:33,874 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:33,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:33,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-25T17:08:34,025 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:34,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-25T17:08:34,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:34,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:34,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:34,026 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:34,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:34,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:34,147 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/d8ac8ae1798b46fea63c34cf888d1f71 2024-11-25T17:08:34,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/7244f830ee1c44beb950964072c9bc83 is 50, key is test_row_0/C:col10/1732554512106/Put/seqid=0 2024-11-25T17:08:34,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-25T17:08:34,178 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:34,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-25T17:08:34,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741974_1150 (size=12001) 2024-11-25T17:08:34,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:34,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:34,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:34,179 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:34,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:34,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:34,331 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:34,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-25T17:08:34,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:34,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:34,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:34,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:34,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:34,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:34,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:34,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554574370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:34,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:34,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554574372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:34,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:34,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554574372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:34,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:34,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554574372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:34,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:34,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554574374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:34,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-25T17:08:34,484 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:34,485 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-25T17:08:34,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:34,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:34,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:34,485 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:34,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:34,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:34,580 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/7244f830ee1c44beb950964072c9bc83 2024-11-25T17:08:34,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/ab3dabf5b5f24a7c937bb68f1dd0fc70 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ab3dabf5b5f24a7c937bb68f1dd0fc70 2024-11-25T17:08:34,595 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ab3dabf5b5f24a7c937bb68f1dd0fc70, entries=250, sequenceid=82, filesize=47.0 K 2024-11-25T17:08:34,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/d8ac8ae1798b46fea63c34cf888d1f71 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/d8ac8ae1798b46fea63c34cf888d1f71 2024-11-25T17:08:34,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,602 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/d8ac8ae1798b46fea63c34cf888d1f71, entries=150, sequenceid=82, filesize=11.7 K 2024-11-25T17:08:34,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/7244f830ee1c44beb950964072c9bc83 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/7244f830ee1c44beb950964072c9bc83 2024-11-25T17:08:34,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/7244f830ee1c44beb950964072c9bc83, entries=150, sequenceid=82, filesize=11.7 K 2024-11-25T17:08:34,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for c60040e46a02e10beaa963566dc1e39f in 1383ms, sequenceid=82, compaction requested=false 2024-11-25T17:08:34,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:34,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,637 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:34,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-25T17:08:34,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:34,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,638 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:08:34,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:34,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:34,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:34,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:34,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:34,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:34,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112595a5405a32a644c99d99ceac98108159_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554513250/Put/seqid=0 2024-11-25T17:08:34,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741975_1151 (size=9714) 2024-11-25T17:08:34,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,681 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112595a5405a32a644c99d99ceac98108159_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112595a5405a32a644c99d99ceac98108159_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:34,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/702966b55cd94fe484e5dede495b0e8d, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:34,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/702966b55cd94fe484e5dede495b0e8d is 175, key is test_row_0/A:col10/1732554513250/Put/seqid=0 2024-11-25T17:08:34,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741976_1152 (size=22361) 2024-11-25T17:08:34,696 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/702966b55cd94fe484e5dede495b0e8d 2024-11-25T17:08:34,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/5a0143e002be4c87921260e044c85e96 is 50, key is test_row_0/B:col10/1732554513250/Put/seqid=0 2024-11-25T17:08:34,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741977_1153 (size=9657) 2024-11-25T17:08:34,716 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/5a0143e002be4c87921260e044c85e96 2024-11-25T17:08:34,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/17b4c2dd7ee146e395231fb1b5765d16 is 50, key is test_row_0/C:col10/1732554513250/Put/seqid=0 2024-11-25T17:08:34,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741978_1154 (size=9657) 2024-11-25T17:08:34,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,759 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/17b4c2dd7ee146e395231fb1b5765d16 2024-11-25T17:08:34,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/702966b55cd94fe484e5dede495b0e8d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/702966b55cd94fe484e5dede495b0e8d 2024-11-25T17:08:34,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,772 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/702966b55cd94fe484e5dede495b0e8d, entries=100, sequenceid=94, filesize=21.8 K 2024-11-25T17:08:34,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/5a0143e002be4c87921260e044c85e96 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/5a0143e002be4c87921260e044c85e96 2024-11-25T17:08:34,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,784 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/5a0143e002be4c87921260e044c85e96, entries=100, sequenceid=94, filesize=9.4 K 2024-11-25T17:08:34,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/17b4c2dd7ee146e395231fb1b5765d16 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/17b4c2dd7ee146e395231fb1b5765d16 2024-11-25T17:08:34,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,790 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/17b4c2dd7ee146e395231fb1b5765d16, entries=100, sequenceid=94, filesize=9.4 K 2024-11-25T17:08:34,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,792 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for c60040e46a02e10beaa963566dc1e39f in 154ms, sequenceid=94, compaction requested=true 2024-11-25T17:08:34,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:34,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:34,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-25T17:08:34,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-25T17:08:34,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-25T17:08:34,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 919 msec 2024-11-25T17:08:34,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,797 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 925 msec 2024-11-25T17:08:34,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-25T17:08:34,978 INFO [Thread-652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-25T17:08:34,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,980 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:34,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-25T17:08:34,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-25T17:08:34,986 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:34,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,987 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:34,987 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:34,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:34,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-25T17:08:35,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,139 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:35,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-25T17:08:35,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:35,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:35,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:35,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-25T17:08:35,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-25T17:08:35,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,144 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-25T17:08:35,144 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 155 msec 2024-11-25T17:08:35,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,148 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 167 msec 2024-11-25T17:08:35,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-25T17:08:35,289 INFO [Thread-652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-25T17:08:35,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,290 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:35,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-25T17:08:35,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,292 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:35,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-25T17:08:35,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,293 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:35,293 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:35,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-25T17:08:35,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,445 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:35,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-25T17:08:35,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:35,446 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-25T17:08:35,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:35,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:35,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:35,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:35,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:35,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:35,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:35,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:35,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125acea707c09e04a799453ed21eb00a1c0_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554515429/Put/seqid=0 2024-11-25T17:08:35,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741979_1155 (size=14594) 2024-11-25T17:08:35,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:35,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554575590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:35,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:35,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554575590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:35,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:35,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554575590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:35,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-25T17:08:35,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:35,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554575592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:35,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:35,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554575597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:35,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:35,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554575697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:35,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:35,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554575697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:35,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:35,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554575698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:35,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:35,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554575701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:35,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:35,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554575706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:35,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-25T17:08:35,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:35,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554575899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:35,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:35,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554575900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:35,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:35,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554575902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:35,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:35,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554575908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:35,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:35,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554575907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:35,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:35,985 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125acea707c09e04a799453ed21eb00a1c0_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125acea707c09e04a799453ed21eb00a1c0_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:35,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/11420c7d3db64fae85ef164422ed776b, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:35,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/11420c7d3db64fae85ef164422ed776b is 175, key is test_row_0/A:col10/1732554515429/Put/seqid=0 2024-11-25T17:08:35,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741980_1156 (size=39549) 2024-11-25T17:08:36,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:36,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554576202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:36,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:36,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554576204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:36,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:36,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554576207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:36,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:36,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554576211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:36,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:36,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554576213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:36,394 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=102, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/11420c7d3db64fae85ef164422ed776b 2024-11-25T17:08:36,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-25T17:08:36,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/168771e9ad1e4b2cac578f63ea240bfb is 50, key is test_row_0/B:col10/1732554515429/Put/seqid=0 2024-11-25T17:08:36,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741981_1157 (size=12001) 2024-11-25T17:08:36,433 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/168771e9ad1e4b2cac578f63ea240bfb 2024-11-25T17:08:36,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/ed4d0a2fe0f74e4280ef61d1b79a1686 is 50, key is test_row_0/C:col10/1732554515429/Put/seqid=0 2024-11-25T17:08:36,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741982_1158 (size=12001) 2024-11-25T17:08:36,463 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/ed4d0a2fe0f74e4280ef61d1b79a1686 2024-11-25T17:08:36,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/11420c7d3db64fae85ef164422ed776b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/11420c7d3db64fae85ef164422ed776b 2024-11-25T17:08:36,477 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/11420c7d3db64fae85ef164422ed776b, entries=200, sequenceid=102, filesize=38.6 K 2024-11-25T17:08:36,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/168771e9ad1e4b2cac578f63ea240bfb as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/168771e9ad1e4b2cac578f63ea240bfb 2024-11-25T17:08:36,488 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/168771e9ad1e4b2cac578f63ea240bfb, entries=150, sequenceid=102, filesize=11.7 K 2024-11-25T17:08:36,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/ed4d0a2fe0f74e4280ef61d1b79a1686 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/ed4d0a2fe0f74e4280ef61d1b79a1686 2024-11-25T17:08:36,499 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/ed4d0a2fe0f74e4280ef61d1b79a1686, entries=150, sequenceid=102, filesize=11.7 K 2024-11-25T17:08:36,500 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for c60040e46a02e10beaa963566dc1e39f in 1054ms, sequenceid=102, compaction requested=true 2024-11-25T17:08:36,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:36,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:36,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-25T17:08:36,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-25T17:08:36,506 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-25T17:08:36,506 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2100 sec 2024-11-25T17:08:36,508 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.2160 sec 2024-11-25T17:08:36,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:36,710 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-25T17:08:36,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:36,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:36,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:36,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:36,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:36,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:36,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:36,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:36,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554576714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:36,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554576715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:36,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:36,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554576716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:36,719 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:36,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554576717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:36,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:36,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554576717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:36,726 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125708dbb64666a4d69988077b908882380_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554516709/Put/seqid=0 2024-11-25T17:08:36,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741983_1159 (size=17234) 2024-11-25T17:08:36,741 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:36,746 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125708dbb64666a4d69988077b908882380_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125708dbb64666a4d69988077b908882380_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:36,747 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/c9cb5dc4717840c08e0f52b4242ae211, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:36,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/c9cb5dc4717840c08e0f52b4242ae211 is 175, key is test_row_0/A:col10/1732554516709/Put/seqid=0 2024-11-25T17:08:36,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741984_1160 (size=48339) 2024-11-25T17:08:36,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:36,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554576817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:36,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:36,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554576818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:36,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:36,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554576819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:37,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:37,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554577019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:37,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:37,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554577020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:37,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:37,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554577022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:37,152 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=62.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/c9cb5dc4717840c08e0f52b4242ae211 2024-11-25T17:08:37,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/811f2ce2a3584562938bb3467a87180c is 50, key is test_row_0/B:col10/1732554516709/Put/seqid=0 2024-11-25T17:08:37,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741985_1161 (size=12101) 2024-11-25T17:08:37,195 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/811f2ce2a3584562938bb3467a87180c 2024-11-25T17:08:37,208 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/423dd693721a4b8fbaf49dbe7da2d85f is 50, key is test_row_0/C:col10/1732554516709/Put/seqid=0 2024-11-25T17:08:37,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741986_1162 (size=12101) 2024-11-25T17:08:37,220 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/423dd693721a4b8fbaf49dbe7da2d85f 2024-11-25T17:08:37,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/c9cb5dc4717840c08e0f52b4242ae211 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/c9cb5dc4717840c08e0f52b4242ae211 2024-11-25T17:08:37,232 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/c9cb5dc4717840c08e0f52b4242ae211, entries=250, sequenceid=133, filesize=47.2 K 2024-11-25T17:08:37,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/811f2ce2a3584562938bb3467a87180c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/811f2ce2a3584562938bb3467a87180c 2024-11-25T17:08:37,238 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/811f2ce2a3584562938bb3467a87180c, entries=150, sequenceid=133, filesize=11.8 K 2024-11-25T17:08:37,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/423dd693721a4b8fbaf49dbe7da2d85f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/423dd693721a4b8fbaf49dbe7da2d85f 2024-11-25T17:08:37,244 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/423dd693721a4b8fbaf49dbe7da2d85f, entries=150, sequenceid=133, filesize=11.8 K 2024-11-25T17:08:37,246 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=20.13 KB/20610 for c60040e46a02e10beaa963566dc1e39f in 536ms, sequenceid=133, compaction requested=true 2024-11-25T17:08:37,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:37,246 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-25T17:08:37,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:37,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:37,246 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-25T17:08:37,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:37,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:37,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:37,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:37,249 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 189446 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-25T17:08:37,249 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/A is initiating minor compaction (all files) 2024-11-25T17:08:37,249 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/A in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:37,249 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/2da7bfca8e3343d99872cf7a0f474ce9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ab3dabf5b5f24a7c937bb68f1dd0fc70, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/702966b55cd94fe484e5dede495b0e8d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/11420c7d3db64fae85ef164422ed776b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/c9cb5dc4717840c08e0f52b4242ae211] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=185.0 K 2024-11-25T17:08:37,249 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:37,249 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/2da7bfca8e3343d99872cf7a0f474ce9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ab3dabf5b5f24a7c937bb68f1dd0fc70, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/702966b55cd94fe484e5dede495b0e8d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/11420c7d3db64fae85ef164422ed776b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/c9cb5dc4717840c08e0f52b4242ae211] 2024-11-25T17:08:37,250 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 57864 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-25T17:08:37,250 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/B is initiating minor compaction (all files) 2024-11-25T17:08:37,250 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/B in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:37,250 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/7bee49e7cd294be2907d6ccc193be33d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/d8ac8ae1798b46fea63c34cf888d1f71, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/5a0143e002be4c87921260e044c85e96, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/168771e9ad1e4b2cac578f63ea240bfb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/811f2ce2a3584562938bb3467a87180c] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=56.5 K 2024-11-25T17:08:37,250 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2da7bfca8e3343d99872cf7a0f474ce9, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732554510957 2024-11-25T17:08:37,251 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bee49e7cd294be2907d6ccc193be33d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732554510957 2024-11-25T17:08:37,251 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab3dabf5b5f24a7c937bb68f1dd0fc70, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732554512099 2024-11-25T17:08:37,251 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting d8ac8ae1798b46fea63c34cf888d1f71, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732554512106 2024-11-25T17:08:37,252 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 702966b55cd94fe484e5dede495b0e8d, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732554513250 2024-11-25T17:08:37,252 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a0143e002be4c87921260e044c85e96, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732554513250 2024-11-25T17:08:37,252 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 168771e9ad1e4b2cac578f63ea240bfb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1732554515429 2024-11-25T17:08:37,253 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11420c7d3db64fae85ef164422ed776b, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1732554515429 2024-11-25T17:08:37,253 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 811f2ce2a3584562938bb3467a87180c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732554515587 2024-11-25T17:08:37,254 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9cb5dc4717840c08e0f52b4242ae211, keycount=250, bloomtype=ROW, size=47.2 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732554515587 2024-11-25T17:08:37,270 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:37,274 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411254bbefe2b3a2a408289f2b0c3dcf50a44_c60040e46a02e10beaa963566dc1e39f store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:37,274 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#B#compaction#137 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:37,275 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/5e400d05e12d49dab5d8433be143b865 is 50, key is test_row_0/B:col10/1732554516709/Put/seqid=0 2024-11-25T17:08:37,277 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411254bbefe2b3a2a408289f2b0c3dcf50a44_c60040e46a02e10beaa963566dc1e39f, store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:37,278 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411254bbefe2b3a2a408289f2b0c3dcf50a44_c60040e46a02e10beaa963566dc1e39f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:37,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741987_1163 (size=12375) 2024-11-25T17:08:37,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741988_1164 (size=4469) 2024-11-25T17:08:37,322 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/5e400d05e12d49dab5d8433be143b865 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/5e400d05e12d49dab5d8433be143b865 2024-11-25T17:08:37,328 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/B of c60040e46a02e10beaa963566dc1e39f into 5e400d05e12d49dab5d8433be143b865(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:37,329 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:37,329 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/B, priority=11, startTime=1732554517246; duration=0sec 2024-11-25T17:08:37,329 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:37,329 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:B 2024-11-25T17:08:37,329 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-25T17:08:37,332 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 57864 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-25T17:08:37,332 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/C is initiating minor compaction (all files) 2024-11-25T17:08:37,332 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/C in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:37,333 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/5374b1c66bd04d3a8cdf8cbb2e34eb4e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/7244f830ee1c44beb950964072c9bc83, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/17b4c2dd7ee146e395231fb1b5765d16, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/ed4d0a2fe0f74e4280ef61d1b79a1686, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/423dd693721a4b8fbaf49dbe7da2d85f] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=56.5 K 2024-11-25T17:08:37,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:37,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-25T17:08:37,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:37,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:37,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:37,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:37,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:37,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:37,334 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 5374b1c66bd04d3a8cdf8cbb2e34eb4e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732554510957 2024-11-25T17:08:37,335 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 7244f830ee1c44beb950964072c9bc83, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1732554512106 2024-11-25T17:08:37,335 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 17b4c2dd7ee146e395231fb1b5765d16, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732554513250 2024-11-25T17:08:37,346 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting ed4d0a2fe0f74e4280ef61d1b79a1686, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1732554515429 2024-11-25T17:08:37,347 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 423dd693721a4b8fbaf49dbe7da2d85f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732554515587 2024-11-25T17:08:37,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411257988bec819f643c098fca8c29fdf3aee_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554517332/Put/seqid=0 2024-11-25T17:08:37,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-25T17:08:37,400 INFO [Thread-652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-25T17:08:37,403 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:37,404 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#C#compaction#140 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:37,405 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/65533ab0b6b74c7593f76f48d3ee8150 is 50, key is test_row_0/C:col10/1732554516709/Put/seqid=0 2024-11-25T17:08:37,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-25T17:08:37,409 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:37,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-25T17:08:37,410 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:37,410 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:37,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:37,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554577409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:37,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:37,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554577409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:37,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:37,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554577411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:37,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741989_1165 (size=19774) 2024-11-25T17:08:37,433 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:37,443 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411257988bec819f643c098fca8c29fdf3aee_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411257988bec819f643c098fca8c29fdf3aee_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:37,445 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/fdd5ab5e16cc47baa2e40c6186a62465, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:37,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/fdd5ab5e16cc47baa2e40c6186a62465 is 175, key is test_row_0/A:col10/1732554517332/Put/seqid=0 2024-11-25T17:08:37,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741990_1166 (size=12375) 2024-11-25T17:08:37,469 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/65533ab0b6b74c7593f76f48d3ee8150 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/65533ab0b6b74c7593f76f48d3ee8150 2024-11-25T17:08:37,481 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/C of c60040e46a02e10beaa963566dc1e39f into 65533ab0b6b74c7593f76f48d3ee8150(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:37,481 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:37,481 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/C, priority=11, startTime=1732554517247; duration=0sec 2024-11-25T17:08:37,481 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:37,481 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:C 2024-11-25T17:08:37,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741991_1167 (size=57033) 2024-11-25T17:08:37,491 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=146, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/fdd5ab5e16cc47baa2e40c6186a62465 2024-11-25T17:08:37,510 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/8ede28a7d8804096a9202b5b97a17194 is 50, key is test_row_0/B:col10/1732554517332/Put/seqid=0 2024-11-25T17:08:37,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:37,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554577512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:37,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:37,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554577513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:37,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-25T17:08:37,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:37,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554577516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:37,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741992_1168 (size=12151) 2024-11-25T17:08:37,563 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:37,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-25T17:08:37,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:37,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:37,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:37,564 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:37,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:37,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:37,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:37,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554577715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:37,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:37,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554577715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:37,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-25T17:08:37,718 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:37,718 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#A#compaction#138 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:37,719 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-25T17:08:37,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:37,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:37,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:37,719 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:37,719 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/859d85cafa784804abdafd1662c0811a is 175, key is test_row_0/A:col10/1732554516709/Put/seqid=0 2024-11-25T17:08:37,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:37,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:37,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554577722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:37,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:37,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554577722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:37,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:37,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741993_1169 (size=31329) 2024-11-25T17:08:37,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:37,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554577730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:37,875 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:37,876 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-25T17:08:37,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:37,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:37,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:37,876 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:37,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:37,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:37,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=146 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/8ede28a7d8804096a9202b5b97a17194 2024-11-25T17:08:37,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/c0b1edb9c4844d51b72089083804014a is 50, key is test_row_0/C:col10/1732554517332/Put/seqid=0 2024-11-25T17:08:37,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741994_1170 (size=12151) 2024-11-25T17:08:38,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-25T17:08:38,018 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:38,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554578017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:38,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554578020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:38,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:38,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554578025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:38,029 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:38,029 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-25T17:08:38,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:38,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:38,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:38,030 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:38,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:38,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:38,134 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/859d85cafa784804abdafd1662c0811a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/859d85cafa784804abdafd1662c0811a 2024-11-25T17:08:38,139 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/A of c60040e46a02e10beaa963566dc1e39f into 859d85cafa784804abdafd1662c0811a(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:38,139 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:38,140 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/A, priority=11, startTime=1732554517246; duration=0sec 2024-11-25T17:08:38,140 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:38,140 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:A 2024-11-25T17:08:38,182 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:38,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-25T17:08:38,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:38,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:38,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:38,183 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:38,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:38,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:38,335 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:38,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-25T17:08:38,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:38,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:38,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:38,336 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:38,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:38,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:38,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=146 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/c0b1edb9c4844d51b72089083804014a 2024-11-25T17:08:38,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/fdd5ab5e16cc47baa2e40c6186a62465 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/fdd5ab5e16cc47baa2e40c6186a62465 2024-11-25T17:08:38,367 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/fdd5ab5e16cc47baa2e40c6186a62465, entries=300, sequenceid=146, filesize=55.7 K 2024-11-25T17:08:38,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/8ede28a7d8804096a9202b5b97a17194 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/8ede28a7d8804096a9202b5b97a17194 2024-11-25T17:08:38,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/8ede28a7d8804096a9202b5b97a17194, entries=150, sequenceid=146, filesize=11.9 K 2024-11-25T17:08:38,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/c0b1edb9c4844d51b72089083804014a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/c0b1edb9c4844d51b72089083804014a 2024-11-25T17:08:38,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/c0b1edb9c4844d51b72089083804014a, entries=150, sequenceid=146, filesize=11.9 K 2024-11-25T17:08:38,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for c60040e46a02e10beaa963566dc1e39f in 1086ms, sequenceid=146, compaction requested=false 2024-11-25T17:08:38,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:38,489 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:38,489 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-25T17:08:38,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:38,490 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-25T17:08:38,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:38,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:38,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:38,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:38,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:38,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:38,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125a45d81c9e7e14f43bc4dbad852b8eac6_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554517407/Put/seqid=0 2024-11-25T17:08:38,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-25T17:08:38,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:38,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:38,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741995_1171 (size=12304) 2024-11-25T17:08:38,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:38,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:38,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554578537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:38,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:38,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554578537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:38,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:38,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554578540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:38,544 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125a45d81c9e7e14f43bc4dbad852b8eac6_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125a45d81c9e7e14f43bc4dbad852b8eac6_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:38,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/ed4e381df41b4c12b59e8a8e9f935fb1, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:38,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/ed4e381df41b4c12b59e8a8e9f935fb1 is 175, key is test_row_0/A:col10/1732554517407/Put/seqid=0 2024-11-25T17:08:38,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741996_1172 (size=31105) 2024-11-25T17:08:38,559 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=172, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/ed4e381df41b4c12b59e8a8e9f935fb1 2024-11-25T17:08:38,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/f5627db4c4b248078d2e74c7f7817647 is 50, key is test_row_0/B:col10/1732554517407/Put/seqid=0 2024-11-25T17:08:38,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741997_1173 (size=12151) 2024-11-25T17:08:38,581 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/f5627db4c4b248078d2e74c7f7817647 2024-11-25T17:08:38,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/2e2ca5abd29c48309cce873266b185e2 is 50, key is test_row_0/C:col10/1732554517407/Put/seqid=0 2024-11-25T17:08:38,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741998_1174 (size=12151) 2024-11-25T17:08:38,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:38,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554578641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:38,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:38,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554578641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:38,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:38,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554578643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:38,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:38,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:38,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554578843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:38,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554578844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:38,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:38,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554578847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:39,006 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/2e2ca5abd29c48309cce873266b185e2 2024-11-25T17:08:39,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/ed4e381df41b4c12b59e8a8e9f935fb1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ed4e381df41b4c12b59e8a8e9f935fb1 2024-11-25T17:08:39,018 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ed4e381df41b4c12b59e8a8e9f935fb1, entries=150, sequenceid=172, filesize=30.4 K 2024-11-25T17:08:39,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/f5627db4c4b248078d2e74c7f7817647 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/f5627db4c4b248078d2e74c7f7817647 2024-11-25T17:08:39,027 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/f5627db4c4b248078d2e74c7f7817647, entries=150, sequenceid=172, filesize=11.9 K 2024-11-25T17:08:39,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/2e2ca5abd29c48309cce873266b185e2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/2e2ca5abd29c48309cce873266b185e2 2024-11-25T17:08:39,034 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/2e2ca5abd29c48309cce873266b185e2, entries=150, sequenceid=172, filesize=11.9 K 2024-11-25T17:08:39,036 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for c60040e46a02e10beaa963566dc1e39f in 547ms, sequenceid=172, compaction requested=true 2024-11-25T17:08:39,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:39,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:39,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-25T17:08:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-25T17:08:39,039 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-25T17:08:39,039 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6270 sec 2024-11-25T17:08:39,041 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.6370 sec 2024-11-25T17:08:39,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:39,150 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-25T17:08:39,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:39,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:39,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:39,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:39,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:39,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:39,172 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112566ee7ec363484ba7a22f5b1fc730a746_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554519149/Put/seqid=0 2024-11-25T17:08:39,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741999_1175 (size=14794) 2024-11-25T17:08:39,180 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:39,185 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112566ee7ec363484ba7a22f5b1fc730a746_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112566ee7ec363484ba7a22f5b1fc730a746_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:39,187 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/d73d376fafb94af895b7b5a4681c562c, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:39,187 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/d73d376fafb94af895b7b5a4681c562c is 175, key is test_row_0/A:col10/1732554519149/Put/seqid=0 2024-11-25T17:08:39,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742000_1176 (size=39749) 2024-11-25T17:08:39,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:39,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554579239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:39,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:39,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554579239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:39,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:39,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554579240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:39,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:39,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554579346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:39,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:39,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554579347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:39,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:39,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554579346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:39,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-25T17:08:39,519 INFO [Thread-652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-25T17:08:39,521 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:39,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-25T17:08:39,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-25T17:08:39,530 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:39,530 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:39,531 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:39,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:39,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554579549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:39,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:39,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554579550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:39,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:39,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554579550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:39,599 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=186, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/d73d376fafb94af895b7b5a4681c562c 2024-11-25T17:08:39,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/af3ae5f1024041e8875d7e489653db61 is 50, key is test_row_0/B:col10/1732554519149/Put/seqid=0 2024-11-25T17:08:39,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-25T17:08:39,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742001_1177 (size=12151) 2024-11-25T17:08:39,682 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:39,683 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-25T17:08:39,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:39,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:39,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:39,683 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:39,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:39,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:39,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:39,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554579730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:39,732 DEBUG [Thread-642 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., hostname=6579369734b6,41865,1732554474464, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:08:39,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:39,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554579742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:39,745 DEBUG [Thread-644 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., hostname=6579369734b6,41865,1732554474464, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:08:39,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-25T17:08:39,838 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:39,839 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-25T17:08:39,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:39,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:39,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:39,839 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:39,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:39,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:39,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:39,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554579855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:39,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:39,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554579856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:39,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:39,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554579857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:39,992 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:39,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-25T17:08:39,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:39,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:39,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:39,992 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:39,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:40,040 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=186 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/af3ae5f1024041e8875d7e489653db61 2024-11-25T17:08:40,050 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/865391811da94e41ac3f7e66f5380009 is 50, key is test_row_0/C:col10/1732554519149/Put/seqid=0 2024-11-25T17:08:40,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742002_1178 (size=12151) 2024-11-25T17:08:40,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-25T17:08:40,146 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:40,147 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-25T17:08:40,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:40,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:40,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:40,147 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:40,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:40,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:40,202 INFO [master/6579369734b6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-25T17:08:40,202 INFO [master/6579369734b6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-25T17:08:40,299 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:40,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-25T17:08:40,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:40,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:40,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:40,300 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:40,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:40,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:40,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554580362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:40,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:40,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554580362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:40,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:40,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554580365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:40,456 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:40,457 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-25T17:08:40,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:40,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:40,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:40,458 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:40,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:40,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:40,462 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=186 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/865391811da94e41ac3f7e66f5380009 2024-11-25T17:08:40,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/d73d376fafb94af895b7b5a4681c562c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/d73d376fafb94af895b7b5a4681c562c 2024-11-25T17:08:40,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/d73d376fafb94af895b7b5a4681c562c, entries=200, sequenceid=186, filesize=38.8 K 2024-11-25T17:08:40,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/af3ae5f1024041e8875d7e489653db61 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/af3ae5f1024041e8875d7e489653db61 2024-11-25T17:08:40,481 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/af3ae5f1024041e8875d7e489653db61, entries=150, sequenceid=186, filesize=11.9 K 2024-11-25T17:08:40,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/865391811da94e41ac3f7e66f5380009 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/865391811da94e41ac3f7e66f5380009 2024-11-25T17:08:40,487 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/865391811da94e41ac3f7e66f5380009, entries=150, sequenceid=186, filesize=11.9 K 2024-11-25T17:08:40,488 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=140.89 KB/144270 for c60040e46a02e10beaa963566dc1e39f in 1338ms, sequenceid=186, compaction requested=true 2024-11-25T17:08:40,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:40,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:40,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:40,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:40,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:40,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:40,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:40,489 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:08:40,489 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:08:40,490 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 159216 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:08:40,490 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48828 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:08:40,490 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/A is initiating minor compaction (all files) 2024-11-25T17:08:40,490 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/B is initiating minor compaction (all files) 2024-11-25T17:08:40,490 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/A in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:40,490 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/B in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:40,490 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/859d85cafa784804abdafd1662c0811a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/fdd5ab5e16cc47baa2e40c6186a62465, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ed4e381df41b4c12b59e8a8e9f935fb1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/d73d376fafb94af895b7b5a4681c562c] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=155.5 K 2024-11-25T17:08:40,490 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/5e400d05e12d49dab5d8433be143b865, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/8ede28a7d8804096a9202b5b97a17194, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/f5627db4c4b248078d2e74c7f7817647, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/af3ae5f1024041e8875d7e489653db61] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=47.7 K 2024-11-25T17:08:40,490 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:40,490 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/859d85cafa784804abdafd1662c0811a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/fdd5ab5e16cc47baa2e40c6186a62465, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ed4e381df41b4c12b59e8a8e9f935fb1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/d73d376fafb94af895b7b5a4681c562c] 2024-11-25T17:08:40,491 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 859d85cafa784804abdafd1662c0811a, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732554515587 2024-11-25T17:08:40,491 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e400d05e12d49dab5d8433be143b865, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732554515587 2024-11-25T17:08:40,491 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting fdd5ab5e16cc47baa2e40c6186a62465, keycount=300, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1732554516715 2024-11-25T17:08:40,491 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ede28a7d8804096a9202b5b97a17194, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1732554517329 2024-11-25T17:08:40,492 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting f5627db4c4b248078d2e74c7f7817647, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732554517406 2024-11-25T17:08:40,492 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed4e381df41b4c12b59e8a8e9f935fb1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732554517406 2024-11-25T17:08:40,492 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting af3ae5f1024041e8875d7e489653db61, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1732554518537 2024-11-25T17:08:40,492 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting d73d376fafb94af895b7b5a4681c562c, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1732554518531 2024-11-25T17:08:40,504 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#B#compaction#149 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:40,505 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/849762f7f264476bb32e61b0ccb2c859 is 50, key is test_row_0/B:col10/1732554519149/Put/seqid=0 2024-11-25T17:08:40,530 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:40,550 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241125cedd3ca5182d4570809b8d8f1e95b249_c60040e46a02e10beaa963566dc1e39f store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:40,553 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241125cedd3ca5182d4570809b8d8f1e95b249_c60040e46a02e10beaa963566dc1e39f, store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:40,553 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125cedd3ca5182d4570809b8d8f1e95b249_c60040e46a02e10beaa963566dc1e39f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:40,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742003_1179 (size=12561) 2024-11-25T17:08:40,566 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/849762f7f264476bb32e61b0ccb2c859 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/849762f7f264476bb32e61b0ccb2c859 2024-11-25T17:08:40,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742004_1180 (size=4469) 2024-11-25T17:08:40,575 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#A#compaction#150 average throughput is 0.54 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:40,576 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/ff09492d99ed4747b19237dfb5bbc7b9 is 175, key is test_row_0/A:col10/1732554519149/Put/seqid=0 2024-11-25T17:08:40,579 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/B of c60040e46a02e10beaa963566dc1e39f into 849762f7f264476bb32e61b0ccb2c859(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:40,579 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:40,579 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/B, priority=12, startTime=1732554520488; duration=0sec 2024-11-25T17:08:40,580 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:40,580 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:B 2024-11-25T17:08:40,580 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:08:40,581 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48828 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:08:40,582 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/C is initiating minor compaction (all files) 2024-11-25T17:08:40,582 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/C in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:40,582 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/65533ab0b6b74c7593f76f48d3ee8150, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/c0b1edb9c4844d51b72089083804014a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/2e2ca5abd29c48309cce873266b185e2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/865391811da94e41ac3f7e66f5380009] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=47.7 K 2024-11-25T17:08:40,582 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 65533ab0b6b74c7593f76f48d3ee8150, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732554515587 2024-11-25T17:08:40,583 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting c0b1edb9c4844d51b72089083804014a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1732554517329 2024-11-25T17:08:40,583 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e2ca5abd29c48309cce873266b185e2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732554517406 2024-11-25T17:08:40,584 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 865391811da94e41ac3f7e66f5380009, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1732554518537 2024-11-25T17:08:40,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742005_1181 (size=31515) 2024-11-25T17:08:40,602 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#C#compaction#151 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:40,602 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/2d21014f9df24495a1babee5f0743875 is 50, key is test_row_0/C:col10/1732554519149/Put/seqid=0 2024-11-25T17:08:40,617 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:40,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-25T17:08:40,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:40,618 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-25T17:08:40,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:40,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:40,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:40,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:40,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:40,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:40,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742006_1182 (size=12561) 2024-11-25T17:08:40,626 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/2d21014f9df24495a1babee5f0743875 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/2d21014f9df24495a1babee5f0743875 2024-11-25T17:08:40,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112519b39cc3117946708e2a8dfffcb37a1f_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554519166/Put/seqid=0 2024-11-25T17:08:40,635 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/C of c60040e46a02e10beaa963566dc1e39f into 2d21014f9df24495a1babee5f0743875(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:40,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-25T17:08:40,635 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:40,635 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/C, priority=12, startTime=1732554520488; duration=0sec 2024-11-25T17:08:40,636 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:40,636 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:C 2024-11-25T17:08:40,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742007_1183 (size=12304) 2024-11-25T17:08:40,992 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/ff09492d99ed4747b19237dfb5bbc7b9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ff09492d99ed4747b19237dfb5bbc7b9 2024-11-25T17:08:40,999 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/A of c60040e46a02e10beaa963566dc1e39f into ff09492d99ed4747b19237dfb5bbc7b9(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:40,999 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:40,999 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/A, priority=12, startTime=1732554520488; duration=0sec 2024-11-25T17:08:40,999 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:40,999 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:A 2024-11-25T17:08:41,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:41,050 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112519b39cc3117946708e2a8dfffcb37a1f_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112519b39cc3117946708e2a8dfffcb37a1f_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:41,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/4a12bbe578d84cb1b6ddd33c03edf71e, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:41,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/4a12bbe578d84cb1b6ddd33c03edf71e is 175, key is test_row_0/A:col10/1732554519166/Put/seqid=0 2024-11-25T17:08:41,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742008_1184 (size=31105) 2024-11-25T17:08:41,056 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/4a12bbe578d84cb1b6ddd33c03edf71e 2024-11-25T17:08:41,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/d82443a94bab497cae7c656c3096daf3 is 50, key is test_row_0/B:col10/1732554519166/Put/seqid=0 2024-11-25T17:08:41,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742009_1185 (size=12151) 2024-11-25T17:08:41,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:41,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:41,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:41,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554581387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:41,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:41,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554581387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:41,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:41,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554581387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:41,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:41,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:41,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554581492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:41,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554581492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:41,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:41,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554581493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:41,495 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/d82443a94bab497cae7c656c3096daf3 2024-11-25T17:08:41,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/cd7571cf7c31424ba1db4bd2b96dfd09 is 50, key is test_row_0/C:col10/1732554519166/Put/seqid=0 2024-11-25T17:08:41,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742010_1186 (size=12151) 2024-11-25T17:08:41,589 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/cd7571cf7c31424ba1db4bd2b96dfd09 2024-11-25T17:08:41,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/4a12bbe578d84cb1b6ddd33c03edf71e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/4a12bbe578d84cb1b6ddd33c03edf71e 2024-11-25T17:08:41,616 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/4a12bbe578d84cb1b6ddd33c03edf71e, entries=150, sequenceid=211, filesize=30.4 K 2024-11-25T17:08:41,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/d82443a94bab497cae7c656c3096daf3 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/d82443a94bab497cae7c656c3096daf3 2024-11-25T17:08:41,622 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/d82443a94bab497cae7c656c3096daf3, entries=150, sequenceid=211, filesize=11.9 K 2024-11-25T17:08:41,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/cd7571cf7c31424ba1db4bd2b96dfd09 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/cd7571cf7c31424ba1db4bd2b96dfd09 2024-11-25T17:08:41,631 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/cd7571cf7c31424ba1db4bd2b96dfd09, entries=150, sequenceid=211, filesize=11.9 K 2024-11-25T17:08:41,634 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for c60040e46a02e10beaa963566dc1e39f in 1016ms, sequenceid=211, compaction requested=false 2024-11-25T17:08:41,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:41,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:41,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-25T17:08:41,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-25T17:08:41,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-25T17:08:41,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-25T17:08:41,640 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1060 sec 2024-11-25T17:08:41,641 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 2.1190 sec 2024-11-25T17:08:41,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:41,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-25T17:08:41,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:41,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:41,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:41,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:41,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:41,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:41,715 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112530f43586dbe148f0a446ec988187ce4f_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554521696/Put/seqid=0 2024-11-25T17:08:41,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:41,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554581741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:41,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:41,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554581742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:41,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:41,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554581743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:41,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742011_1187 (size=12304) 2024-11-25T17:08:41,754 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:41,761 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112530f43586dbe148f0a446ec988187ce4f_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112530f43586dbe148f0a446ec988187ce4f_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:41,763 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/db1677887fd24d38b11bb76d2bb8c814, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:41,764 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/db1677887fd24d38b11bb76d2bb8c814 is 175, key is test_row_0/A:col10/1732554521696/Put/seqid=0 2024-11-25T17:08:41,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742012_1188 (size=31105) 2024-11-25T17:08:41,809 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=227, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/db1677887fd24d38b11bb76d2bb8c814 2024-11-25T17:08:41,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/91dbb05a17fa44a582a23dea912b9af0 is 50, key is test_row_0/B:col10/1732554521696/Put/seqid=0 2024-11-25T17:08:41,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:41,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554581848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:41,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:41,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554581849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:41,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:41,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554581849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:41,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742013_1189 (size=12151) 2024-11-25T17:08:42,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:42,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:42,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554582050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:42,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554582051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:42,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:42,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554582057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:42,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/91dbb05a17fa44a582a23dea912b9af0 2024-11-25T17:08:42,263 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/277af2f02de940f298589c223b288848 is 50, key is test_row_0/C:col10/1732554521696/Put/seqid=0 2024-11-25T17:08:42,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742014_1190 (size=12151) 2024-11-25T17:08:42,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:42,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554582357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:42,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:42,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554582361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:42,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:42,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554582367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:42,696 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/277af2f02de940f298589c223b288848 2024-11-25T17:08:42,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/db1677887fd24d38b11bb76d2bb8c814 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/db1677887fd24d38b11bb76d2bb8c814 2024-11-25T17:08:42,706 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/db1677887fd24d38b11bb76d2bb8c814, entries=150, sequenceid=227, filesize=30.4 K 2024-11-25T17:08:42,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/91dbb05a17fa44a582a23dea912b9af0 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/91dbb05a17fa44a582a23dea912b9af0 2024-11-25T17:08:42,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/91dbb05a17fa44a582a23dea912b9af0, entries=150, sequenceid=227, filesize=11.9 K 2024-11-25T17:08:42,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/277af2f02de940f298589c223b288848 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/277af2f02de940f298589c223b288848 2024-11-25T17:08:42,718 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/277af2f02de940f298589c223b288848, entries=150, sequenceid=227, filesize=11.9 K 2024-11-25T17:08:42,719 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for c60040e46a02e10beaa963566dc1e39f in 1022ms, sequenceid=227, compaction requested=true 2024-11-25T17:08:42,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:42,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:42,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:42,719 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:42,719 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:42,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:42,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:42,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:42,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:42,721 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93725 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:42,721 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/A is initiating minor compaction (all files) 2024-11-25T17:08:42,721 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/A in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:42,721 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ff09492d99ed4747b19237dfb5bbc7b9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/4a12bbe578d84cb1b6ddd33c03edf71e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/db1677887fd24d38b11bb76d2bb8c814] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=91.5 K 2024-11-25T17:08:42,721 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:42,721 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ff09492d99ed4747b19237dfb5bbc7b9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/4a12bbe578d84cb1b6ddd33c03edf71e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/db1677887fd24d38b11bb76d2bb8c814] 2024-11-25T17:08:42,721 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:42,722 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/B is initiating minor compaction (all files) 2024-11-25T17:08:42,722 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/B in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:42,722 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/849762f7f264476bb32e61b0ccb2c859, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/d82443a94bab497cae7c656c3096daf3, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/91dbb05a17fa44a582a23dea912b9af0] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=36.0 K 2024-11-25T17:08:42,722 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff09492d99ed4747b19237dfb5bbc7b9, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1732554518537 2024-11-25T17:08:42,722 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 849762f7f264476bb32e61b0ccb2c859, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1732554518537 2024-11-25T17:08:42,723 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a12bbe578d84cb1b6ddd33c03edf71e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732554519166 2024-11-25T17:08:42,723 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting d82443a94bab497cae7c656c3096daf3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732554519166 2024-11-25T17:08:42,723 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting db1677887fd24d38b11bb76d2bb8c814, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732554521375 2024-11-25T17:08:42,723 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 91dbb05a17fa44a582a23dea912b9af0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732554521375 2024-11-25T17:08:42,734 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#B#compaction#158 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:42,735 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/8f268081013341b8abb81cadb9c8cd57 is 50, key is test_row_0/B:col10/1732554521696/Put/seqid=0 2024-11-25T17:08:42,737 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:42,742 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411257ceb5d1be2d34d5a87d87b113195b00d_c60040e46a02e10beaa963566dc1e39f store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:42,744 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411257ceb5d1be2d34d5a87d87b113195b00d_c60040e46a02e10beaa963566dc1e39f, store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:42,744 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411257ceb5d1be2d34d5a87d87b113195b00d_c60040e46a02e10beaa963566dc1e39f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:42,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742015_1191 (size=12663) 2024-11-25T17:08:42,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742016_1192 (size=4469) 2024-11-25T17:08:42,764 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/8f268081013341b8abb81cadb9c8cd57 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/8f268081013341b8abb81cadb9c8cd57 2024-11-25T17:08:42,765 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#A#compaction#159 average throughput is 0.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:42,766 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/241441a00a3c4d64be039c9d245e70ff is 175, key is test_row_0/A:col10/1732554521696/Put/seqid=0 2024-11-25T17:08:42,771 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/B of c60040e46a02e10beaa963566dc1e39f into 8f268081013341b8abb81cadb9c8cd57(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:42,771 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:42,771 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/B, priority=13, startTime=1732554522719; duration=0sec 2024-11-25T17:08:42,771 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:42,771 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:B 2024-11-25T17:08:42,771 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:42,773 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:42,773 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/C is initiating minor compaction (all files) 2024-11-25T17:08:42,773 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/C in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:42,773 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/2d21014f9df24495a1babee5f0743875, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/cd7571cf7c31424ba1db4bd2b96dfd09, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/277af2f02de940f298589c223b288848] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=36.0 K 2024-11-25T17:08:42,774 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d21014f9df24495a1babee5f0743875, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=186, earliestPutTs=1732554518537 2024-11-25T17:08:42,774 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting cd7571cf7c31424ba1db4bd2b96dfd09, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732554519166 2024-11-25T17:08:42,775 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 277af2f02de940f298589c223b288848, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732554521375 2024-11-25T17:08:42,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742017_1193 (size=31617) 2024-11-25T17:08:42,786 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#C#compaction#160 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:42,786 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/241441a00a3c4d64be039c9d245e70ff as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/241441a00a3c4d64be039c9d245e70ff 2024-11-25T17:08:42,786 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/2f71f389cb9d40abb32c9202b398868b is 50, key is test_row_0/C:col10/1732554521696/Put/seqid=0 2024-11-25T17:08:42,794 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/A of c60040e46a02e10beaa963566dc1e39f into 241441a00a3c4d64be039c9d245e70ff(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:42,794 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:42,794 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/A, priority=13, startTime=1732554522719; duration=0sec 2024-11-25T17:08:42,794 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:42,794 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:A 2024-11-25T17:08:42,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742018_1194 (size=12663) 2024-11-25T17:08:42,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:42,861 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-25T17:08:42,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:42,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:42,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:42,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:42,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:42,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:42,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125723c34a0657a46b2a932695b562326ab_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554521739/Put/seqid=0 2024-11-25T17:08:42,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554582879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:42,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554582879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:42,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:42,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554582879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:42,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742019_1195 (size=12304) 2024-11-25T17:08:42,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:42,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554582981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:42,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:42,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554582982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:42,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:42,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554582986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:43,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:43,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554583183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:43,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:43,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554583189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:43,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:43,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554583203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:43,214 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/2f71f389cb9d40abb32c9202b398868b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/2f71f389cb9d40abb32c9202b398868b 2024-11-25T17:08:43,220 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/C of c60040e46a02e10beaa963566dc1e39f into 2f71f389cb9d40abb32c9202b398868b(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:43,220 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:43,220 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/C, priority=13, startTime=1732554522719; duration=0sec 2024-11-25T17:08:43,220 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:43,220 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:C 2024-11-25T17:08:43,310 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:43,316 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125723c34a0657a46b2a932695b562326ab_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125723c34a0657a46b2a932695b562326ab_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:43,317 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/255ebac4125c4c03bf3779c3f60e8e5e, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:43,318 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/255ebac4125c4c03bf3779c3f60e8e5e is 175, key is test_row_0/A:col10/1732554521739/Put/seqid=0 2024-11-25T17:08:43,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742020_1196 (size=31105) 2024-11-25T17:08:43,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:43,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554583489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:43,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554583494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:43,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:43,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554583506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:43,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-25T17:08:43,638 INFO [Thread-652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-25T17:08:43,639 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:43,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-25T17:08:43,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-25T17:08:43,645 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:43,649 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:43,650 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:43,724 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=252, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/255ebac4125c4c03bf3779c3f60e8e5e 2024-11-25T17:08:43,731 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/19571f75205e42d385d3235e3af1b62e is 50, key is test_row_0/B:col10/1732554521739/Put/seqid=0 2024-11-25T17:08:43,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742021_1197 (size=12151) 2024-11-25T17:08:43,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-25T17:08:43,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:43,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45678 deadline: 1732554583741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:43,743 DEBUG [Thread-642 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8150 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., hostname=6579369734b6,41865,1732554474464, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:08:43,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:43,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45676 deadline: 1732554583774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:43,777 DEBUG [Thread-644 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8180 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., hostname=6579369734b6,41865,1732554474464, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:08:43,802 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:43,802 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-25T17:08:43,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:43,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:43,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:43,803 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:43,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:43,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:43,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-25T17:08:43,955 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:43,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-25T17:08:43,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:43,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:43,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:43,956 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:43,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:43,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:43,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:43,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554583995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:44,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:44,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554584001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:44,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:44,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554584009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:44,108 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:44,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-25T17:08:44,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:44,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:44,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:44,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:44,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:44,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:44,136 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/19571f75205e42d385d3235e3af1b62e 2024-11-25T17:08:44,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/0d3e6f7132754b04800bb75d65318d0c is 50, key is test_row_0/C:col10/1732554521739/Put/seqid=0 2024-11-25T17:08:44,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742022_1198 (size=12151) 2024-11-25T17:08:44,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-25T17:08:44,261 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:44,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-25T17:08:44,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:44,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:44,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:44,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:44,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:44,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:44,417 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:44,418 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-25T17:08:44,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:44,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:44,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:44,418 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:44,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:44,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:44,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/0d3e6f7132754b04800bb75d65318d0c 2024-11-25T17:08:44,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/255ebac4125c4c03bf3779c3f60e8e5e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/255ebac4125c4c03bf3779c3f60e8e5e 2024-11-25T17:08:44,560 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/255ebac4125c4c03bf3779c3f60e8e5e, entries=150, sequenceid=252, filesize=30.4 K 2024-11-25T17:08:44,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/19571f75205e42d385d3235e3af1b62e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/19571f75205e42d385d3235e3af1b62e 2024-11-25T17:08:44,569 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/19571f75205e42d385d3235e3af1b62e, entries=150, sequenceid=252, filesize=11.9 K 2024-11-25T17:08:44,570 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:44,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/0d3e6f7132754b04800bb75d65318d0c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/0d3e6f7132754b04800bb75d65318d0c 2024-11-25T17:08:44,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-25T17:08:44,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:44,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:44,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:44,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:44,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:44,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:44,579 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/0d3e6f7132754b04800bb75d65318d0c, entries=150, sequenceid=252, filesize=11.9 K 2024-11-25T17:08:44,580 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=80.51 KB/82440 for c60040e46a02e10beaa963566dc1e39f in 1719ms, sequenceid=252, compaction requested=false 2024-11-25T17:08:44,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:44,724 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:44,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-25T17:08:44,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:44,725 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-25T17:08:44,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:44,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:44,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:44,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:44,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:44,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:44,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411252dd411ced94f42bb83c9a90f0d672a47_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554522873/Put/seqid=0 2024-11-25T17:08:44,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-25T17:08:44,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742023_1199 (size=12454) 2024-11-25T17:08:44,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:44,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:45,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:45,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554585026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:45,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:45,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554585027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:45,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:45,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554585027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:45,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:45,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554585130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:45,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:45,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554585130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:45,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:45,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554585131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:45,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:45,172 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411252dd411ced94f42bb83c9a90f0d672a47_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411252dd411ced94f42bb83c9a90f0d672a47_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:45,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/5726fe204c5241bc9ce870702b79c01b, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:45,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/5726fe204c5241bc9ce870702b79c01b is 175, key is test_row_0/A:col10/1732554522873/Put/seqid=0 2024-11-25T17:08:45,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742024_1200 (size=31255) 2024-11-25T17:08:45,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:45,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554585333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:45,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:45,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554585334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:45,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:45,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554585334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:45,580 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=268, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/5726fe204c5241bc9ce870702b79c01b 2024-11-25T17:08:45,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/767788f4442c41dcbbd59924688d95cc is 50, key is test_row_0/B:col10/1732554522873/Put/seqid=0 2024-11-25T17:08:45,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742025_1201 (size=12301) 2024-11-25T17:08:45,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:45,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554585638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:45,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:45,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554585639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:45,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:45,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554585639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:45,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-25T17:08:46,009 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/767788f4442c41dcbbd59924688d95cc 2024-11-25T17:08:46,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/f948c474b445440ebb6094b24b9f7512 is 50, key is test_row_0/C:col10/1732554522873/Put/seqid=0 2024-11-25T17:08:46,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742026_1202 (size=12301) 2024-11-25T17:08:46,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:46,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554586140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:46,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:46,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554586142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:46,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:46,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554586144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:46,426 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/f948c474b445440ebb6094b24b9f7512 2024-11-25T17:08:46,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/5726fe204c5241bc9ce870702b79c01b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/5726fe204c5241bc9ce870702b79c01b 2024-11-25T17:08:46,442 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/5726fe204c5241bc9ce870702b79c01b, entries=150, sequenceid=268, filesize=30.5 K 2024-11-25T17:08:46,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/767788f4442c41dcbbd59924688d95cc as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/767788f4442c41dcbbd59924688d95cc 2024-11-25T17:08:46,448 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/767788f4442c41dcbbd59924688d95cc, entries=150, sequenceid=268, filesize=12.0 K 2024-11-25T17:08:46,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/f948c474b445440ebb6094b24b9f7512 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/f948c474b445440ebb6094b24b9f7512 2024-11-25T17:08:46,457 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/f948c474b445440ebb6094b24b9f7512, entries=150, sequenceid=268, filesize=12.0 K 2024-11-25T17:08:46,459 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for c60040e46a02e10beaa963566dc1e39f in 1735ms, sequenceid=268, compaction requested=true 2024-11-25T17:08:46,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:46,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:46,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-25T17:08:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-25T17:08:46,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-25T17:08:46,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8110 sec 2024-11-25T17:08:46,464 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 2.8240 sec 2024-11-25T17:08:47,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:47,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-25T17:08:47,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:47,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:47,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:47,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:47,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:47,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:47,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112520a75e5f75bc486990ff14dcba824fa9_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554525025/Put/seqid=0 2024-11-25T17:08:47,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742027_1203 (size=14994) 2024-11-25T17:08:47,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:47,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554587160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:47,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:47,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554587163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:47,166 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:47,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554587163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:47,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:47,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554587264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:47,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:47,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554587266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:47,272 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:47,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554587270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:47,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:47,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554587470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:47,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:47,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554587471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:47,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:47,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554587478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:47,559 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:47,570 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112520a75e5f75bc486990ff14dcba824fa9_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112520a75e5f75bc486990ff14dcba824fa9_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:47,571 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/c6628831b6c942e19c4b413613163372, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:47,571 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/c6628831b6c942e19c4b413613163372 is 175, key is test_row_0/A:col10/1732554525025/Put/seqid=0 2024-11-25T17:08:47,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742028_1204 (size=39949) 2024-11-25T17:08:47,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-25T17:08:47,746 INFO [Thread-652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-25T17:08:47,755 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:47,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-11-25T17:08:47,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-25T17:08:47,758 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:47,761 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:47,761 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:47,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:47,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554587778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:47,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:47,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554587781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:47,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:47,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554587782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:47,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-25T17:08:47,912 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:47,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-25T17:08:47,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:47,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:47,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:47,914 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:47,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:47,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:47,979 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=290, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/c6628831b6c942e19c4b413613163372 2024-11-25T17:08:47,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/0f26e761e4ed48d19e4c2deb5f4c5998 is 50, key is test_row_0/B:col10/1732554525025/Put/seqid=0 2024-11-25T17:08:48,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742029_1205 (size=12301) 2024-11-25T17:08:48,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-25T17:08:48,071 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:48,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-25T17:08:48,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:48,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:48,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:48,072 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:48,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:48,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:48,224 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:48,225 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-25T17:08:48,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:48,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:48,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:48,225 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:48,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:48,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:48,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:48,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554588283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:48,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:48,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554588285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:48,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:48,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554588292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:48,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-25T17:08:48,377 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:48,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-25T17:08:48,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:48,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:48,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:48,378 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:48,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:48,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:48,420 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/0f26e761e4ed48d19e4c2deb5f4c5998 2024-11-25T17:08:48,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/34c3d4a51e0e4b9ab2ef6fc0dd103720 is 50, key is test_row_0/C:col10/1732554525025/Put/seqid=0 2024-11-25T17:08:48,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742030_1206 (size=12301) 2024-11-25T17:08:48,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/34c3d4a51e0e4b9ab2ef6fc0dd103720 2024-11-25T17:08:48,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/c6628831b6c942e19c4b413613163372 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/c6628831b6c942e19c4b413613163372 2024-11-25T17:08:48,460 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/c6628831b6c942e19c4b413613163372, entries=200, sequenceid=290, filesize=39.0 K 2024-11-25T17:08:48,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/0f26e761e4ed48d19e4c2deb5f4c5998 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/0f26e761e4ed48d19e4c2deb5f4c5998 2024-11-25T17:08:48,466 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/0f26e761e4ed48d19e4c2deb5f4c5998, entries=150, sequenceid=290, filesize=12.0 K 2024-11-25T17:08:48,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/34c3d4a51e0e4b9ab2ef6fc0dd103720 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/34c3d4a51e0e4b9ab2ef6fc0dd103720 2024-11-25T17:08:48,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/34c3d4a51e0e4b9ab2ef6fc0dd103720, entries=150, sequenceid=290, filesize=12.0 K 2024-11-25T17:08:48,473 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for c60040e46a02e10beaa963566dc1e39f in 1328ms, sequenceid=290, compaction requested=true 2024-11-25T17:08:48,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:48,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:48,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:48,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:48,473 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:08:48,473 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:48,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:48,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-25T17:08:48,474 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:08:48,476 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133926 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:08:48,476 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/A is initiating minor compaction (all files) 2024-11-25T17:08:48,476 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49416 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:08:48,476 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/A in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:48,476 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/B is initiating minor compaction (all files) 2024-11-25T17:08:48,476 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/241441a00a3c4d64be039c9d245e70ff, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/255ebac4125c4c03bf3779c3f60e8e5e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/5726fe204c5241bc9ce870702b79c01b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/c6628831b6c942e19c4b413613163372] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=130.8 K 2024-11-25T17:08:48,476 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/B in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:48,476 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:48,476 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/241441a00a3c4d64be039c9d245e70ff, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/255ebac4125c4c03bf3779c3f60e8e5e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/5726fe204c5241bc9ce870702b79c01b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/c6628831b6c942e19c4b413613163372] 2024-11-25T17:08:48,476 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/8f268081013341b8abb81cadb9c8cd57, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/19571f75205e42d385d3235e3af1b62e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/767788f4442c41dcbbd59924688d95cc, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/0f26e761e4ed48d19e4c2deb5f4c5998] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=48.3 K 2024-11-25T17:08:48,477 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 241441a00a3c4d64be039c9d245e70ff, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732554521375 2024-11-25T17:08:48,477 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f268081013341b8abb81cadb9c8cd57, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732554521375 2024-11-25T17:08:48,477 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 255ebac4125c4c03bf3779c3f60e8e5e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732554521739 2024-11-25T17:08:48,477 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 19571f75205e42d385d3235e3af1b62e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732554521739 2024-11-25T17:08:48,478 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 767788f4442c41dcbbd59924688d95cc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1732554522873 2024-11-25T17:08:48,478 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f26e761e4ed48d19e4c2deb5f4c5998, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732554525025 2024-11-25T17:08:48,478 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5726fe204c5241bc9ce870702b79c01b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1732554522873 2024-11-25T17:08:48,479 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting c6628831b6c942e19c4b413613163372, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732554525025 2024-11-25T17:08:48,502 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:48,504 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241125a37c66a53a7a46cf8a30e5540add897b_c60040e46a02e10beaa963566dc1e39f store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:48,505 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#B#compaction#171 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:48,506 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/2521b6671194488992ca8cc40c9d8579 is 50, key is test_row_0/B:col10/1732554525025/Put/seqid=0 2024-11-25T17:08:48,506 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241125a37c66a53a7a46cf8a30e5540add897b_c60040e46a02e10beaa963566dc1e39f, store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:48,506 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125a37c66a53a7a46cf8a30e5540add897b_c60040e46a02e10beaa963566dc1e39f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:48,536 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:48,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-25T17:08:48,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:48,541 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-25T17:08:48,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:48,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:48,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:48,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:48,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:48,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:48,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742032_1208 (size=4469) 2024-11-25T17:08:48,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742031_1207 (size=12949) 2024-11-25T17:08:48,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125efe66a127d184ec7bc053c1d41c2c39f_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554527162/Put/seqid=0 2024-11-25T17:08:48,571 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/2521b6671194488992ca8cc40c9d8579 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/2521b6671194488992ca8cc40c9d8579 2024-11-25T17:08:48,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742033_1209 (size=12454) 2024-11-25T17:08:48,580 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/B of c60040e46a02e10beaa963566dc1e39f into 2521b6671194488992ca8cc40c9d8579(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:48,580 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:48,580 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/B, priority=12, startTime=1732554528473; duration=0sec 2024-11-25T17:08:48,580 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:48,580 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:B 2024-11-25T17:08:48,580 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:08:48,581 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49416 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:08:48,581 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/C is initiating minor compaction (all files) 2024-11-25T17:08:48,582 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/C in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:48,582 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/2f71f389cb9d40abb32c9202b398868b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/0d3e6f7132754b04800bb75d65318d0c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/f948c474b445440ebb6094b24b9f7512, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/34c3d4a51e0e4b9ab2ef6fc0dd103720] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=48.3 K 2024-11-25T17:08:48,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:48,589 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f71f389cb9d40abb32c9202b398868b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732554521375 2024-11-25T17:08:48,589 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d3e6f7132754b04800bb75d65318d0c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732554521739 2024-11-25T17:08:48,590 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting f948c474b445440ebb6094b24b9f7512, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1732554522873 2024-11-25T17:08:48,590 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 34c3d4a51e0e4b9ab2ef6fc0dd103720, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732554525025 2024-11-25T17:08:48,592 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125efe66a127d184ec7bc053c1d41c2c39f_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125efe66a127d184ec7bc053c1d41c2c39f_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:48,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/8ea2d3399971466bb8e1dde60362d510, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:48,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/8ea2d3399971466bb8e1dde60362d510 is 175, key is test_row_0/A:col10/1732554527162/Put/seqid=0 2024-11-25T17:08:48,603 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#C#compaction#173 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:48,603 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/db8277f502cc4128935fd7399c49b43f is 50, key is test_row_0/C:col10/1732554525025/Put/seqid=0 2024-11-25T17:08:48,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742034_1210 (size=31255) 2024-11-25T17:08:48,609 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=305, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/8ea2d3399971466bb8e1dde60362d510 2024-11-25T17:08:48,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/bb9d74f4753f4c15a320ac08a00c18ff is 50, key is test_row_0/B:col10/1732554527162/Put/seqid=0 2024-11-25T17:08:48,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742035_1211 (size=12949) 2024-11-25T17:08:48,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742036_1212 (size=12301) 2024-11-25T17:08:48,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-25T17:08:48,954 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#A#compaction#170 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:48,955 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/e819600c4ea44b0d9b6d7f2931cbc51d is 175, key is test_row_0/A:col10/1732554525025/Put/seqid=0 2024-11-25T17:08:48,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742037_1213 (size=31903) 2024-11-25T17:08:49,066 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/bb9d74f4753f4c15a320ac08a00c18ff 2024-11-25T17:08:49,071 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/db8277f502cc4128935fd7399c49b43f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/db8277f502cc4128935fd7399c49b43f 2024-11-25T17:08:49,081 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/C of c60040e46a02e10beaa963566dc1e39f into db8277f502cc4128935fd7399c49b43f(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:49,081 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:49,081 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/C, priority=12, startTime=1732554528473; duration=0sec 2024-11-25T17:08:49,081 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:49,081 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:C 2024-11-25T17:08:49,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/1f0e3f841cfb45348ef675fa30203595 is 50, key is test_row_0/C:col10/1732554527162/Put/seqid=0 2024-11-25T17:08:49,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742038_1214 (size=12301) 2024-11-25T17:08:49,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:49,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. as already flushing 2024-11-25T17:08:49,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:49,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554589310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:49,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:49,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554589310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:49,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:49,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554589311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:49,387 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/e819600c4ea44b0d9b6d7f2931cbc51d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/e819600c4ea44b0d9b6d7f2931cbc51d 2024-11-25T17:08:49,393 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/A of c60040e46a02e10beaa963566dc1e39f into e819600c4ea44b0d9b6d7f2931cbc51d(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:49,393 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:49,393 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/A, priority=12, startTime=1732554528473; duration=0sec 2024-11-25T17:08:49,393 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:49,393 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:A 2024-11-25T17:08:49,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:49,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554589414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:49,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:49,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554589416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:49,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:49,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554589416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:49,498 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/1f0e3f841cfb45348ef675fa30203595 2024-11-25T17:08:49,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/8ea2d3399971466bb8e1dde60362d510 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/8ea2d3399971466bb8e1dde60362d510 2024-11-25T17:08:49,509 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/8ea2d3399971466bb8e1dde60362d510, entries=150, sequenceid=305, filesize=30.5 K 2024-11-25T17:08:49,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/bb9d74f4753f4c15a320ac08a00c18ff as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/bb9d74f4753f4c15a320ac08a00c18ff 2024-11-25T17:08:49,515 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/bb9d74f4753f4c15a320ac08a00c18ff, entries=150, sequenceid=305, filesize=12.0 K 2024-11-25T17:08:49,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/1f0e3f841cfb45348ef675fa30203595 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/1f0e3f841cfb45348ef675fa30203595 2024-11-25T17:08:49,531 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/1f0e3f841cfb45348ef675fa30203595, entries=150, sequenceid=305, filesize=12.0 K 2024-11-25T17:08:49,533 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for c60040e46a02e10beaa963566dc1e39f in 992ms, sequenceid=305, compaction requested=false 2024-11-25T17:08:49,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:49,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:49,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-11-25T17:08:49,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-11-25T17:08:49,536 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-25T17:08:49,536 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7730 sec 2024-11-25T17:08:49,539 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 1.7820 sec 2024-11-25T17:08:49,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:49,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-25T17:08:49,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:49,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:49,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:49,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:49,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:49,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:49,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:49,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554589641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:49,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125ef89400ffc4e4613915a05fcdd008768_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554529305/Put/seqid=0 2024-11-25T17:08:49,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:49,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554589651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:49,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:49,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554589653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:49,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742039_1215 (size=14994) 2024-11-25T17:08:49,752 DEBUG [Thread-655 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x00cb464a to 127.0.0.1:56265 2024-11-25T17:08:49,752 DEBUG [Thread-653 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x465dc764 to 127.0.0.1:56265 2024-11-25T17:08:49,752 DEBUG [Thread-655 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:49,752 DEBUG [Thread-653 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:49,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:49,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554589753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:49,756 DEBUG [Thread-659 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14c16cd4 to 127.0.0.1:56265 2024-11-25T17:08:49,756 DEBUG [Thread-659 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:49,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:49,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554589756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:49,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:49,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554589758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:49,758 DEBUG [Thread-657 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78cafade to 127.0.0.1:56265 2024-11-25T17:08:49,758 DEBUG [Thread-657 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:49,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-25T17:08:49,870 INFO [Thread-652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-25T17:08:49,956 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:49,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554589956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:49,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:49,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554589957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:49,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:49,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554589959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:50,102 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:50,107 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125ef89400ffc4e4613915a05fcdd008768_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ef89400ffc4e4613915a05fcdd008768_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:50,110 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/7e1bc9ba5c484fb286905d6eab774bae, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:50,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/7e1bc9ba5c484fb286905d6eab774bae is 175, key is test_row_0/A:col10/1732554529305/Put/seqid=0 2024-11-25T17:08:50,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742040_1216 (size=39949) 2024-11-25T17:08:50,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:50,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:50,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554590260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:50,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554590260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:50,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:50,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554590260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:50,518 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=330, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/7e1bc9ba5c484fb286905d6eab774bae 2024-11-25T17:08:50,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/3791b75463204508b4daf46c628cb2e5 is 50, key is test_row_0/B:col10/1732554529305/Put/seqid=0 2024-11-25T17:08:50,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742041_1217 (size=12301) 2024-11-25T17:08:50,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:50,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45648 deadline: 1732554590762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:50,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:50,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45662 deadline: 1732554590764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:50,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:50,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45696 deadline: 1732554590765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:50,930 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/3791b75463204508b4daf46c628cb2e5 2024-11-25T17:08:50,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/b9e0f97ae1984af6b58ef9b7be3436e8 is 50, key is test_row_0/C:col10/1732554529305/Put/seqid=0 2024-11-25T17:08:50,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742042_1218 (size=12301) 2024-11-25T17:08:51,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/b9e0f97ae1984af6b58ef9b7be3436e8 2024-11-25T17:08:51,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/7e1bc9ba5c484fb286905d6eab774bae as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/7e1bc9ba5c484fb286905d6eab774bae 2024-11-25T17:08:51,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/7e1bc9ba5c484fb286905d6eab774bae, entries=200, sequenceid=330, filesize=39.0 K 2024-11-25T17:08:51,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/3791b75463204508b4daf46c628cb2e5 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/3791b75463204508b4daf46c628cb2e5 2024-11-25T17:08:51,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/3791b75463204508b4daf46c628cb2e5, entries=150, sequenceid=330, filesize=12.0 K 2024-11-25T17:08:51,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/b9e0f97ae1984af6b58ef9b7be3436e8 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/b9e0f97ae1984af6b58ef9b7be3436e8 2024-11-25T17:08:51,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/b9e0f97ae1984af6b58ef9b7be3436e8, entries=150, sequenceid=330, filesize=12.0 K 2024-11-25T17:08:51,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for c60040e46a02e10beaa963566dc1e39f in 1749ms, sequenceid=330, compaction requested=true 2024-11-25T17:08:51,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:51,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:51,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:51,372 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:51,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:51,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:51,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c60040e46a02e10beaa963566dc1e39f:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:51,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:51,372 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:51,373 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:51,373 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:51,374 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/B is initiating minor compaction (all files) 2024-11-25T17:08:51,374 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/A is initiating minor compaction (all files) 2024-11-25T17:08:51,374 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/B in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:51,374 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/A in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:51,374 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/e819600c4ea44b0d9b6d7f2931cbc51d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/8ea2d3399971466bb8e1dde60362d510, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/7e1bc9ba5c484fb286905d6eab774bae] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=100.7 K 2024-11-25T17:08:51,374 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/2521b6671194488992ca8cc40c9d8579, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/bb9d74f4753f4c15a320ac08a00c18ff, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/3791b75463204508b4daf46c628cb2e5] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=36.7 K 2024-11-25T17:08:51,374 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:51,374 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/e819600c4ea44b0d9b6d7f2931cbc51d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/8ea2d3399971466bb8e1dde60362d510, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/7e1bc9ba5c484fb286905d6eab774bae] 2024-11-25T17:08:51,375 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting e819600c4ea44b0d9b6d7f2931cbc51d, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732554525025 2024-11-25T17:08:51,375 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 2521b6671194488992ca8cc40c9d8579, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732554525025 2024-11-25T17:08:51,375 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ea2d3399971466bb8e1dde60362d510, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732554527156 2024-11-25T17:08:51,375 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting bb9d74f4753f4c15a320ac08a00c18ff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732554527156 2024-11-25T17:08:51,376 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 3791b75463204508b4daf46c628cb2e5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732554529305 2024-11-25T17:08:51,376 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e1bc9ba5c484fb286905d6eab774bae, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732554529305 2024-11-25T17:08:51,386 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#B#compaction#179 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:51,386 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/bf3d5d920273424491a7ba8467ff0737 is 50, key is test_row_0/B:col10/1732554529305/Put/seqid=0 2024-11-25T17:08:51,389 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:51,391 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411255c16d8e89d2445dfb269812722251a29_c60040e46a02e10beaa963566dc1e39f store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:51,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742043_1219 (size=13051) 2024-11-25T17:08:51,426 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411255c16d8e89d2445dfb269812722251a29_c60040e46a02e10beaa963566dc1e39f, store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:51,426 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411255c16d8e89d2445dfb269812722251a29_c60040e46a02e10beaa963566dc1e39f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:51,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742044_1220 (size=4469) 2024-11-25T17:08:51,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:51,773 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-25T17:08:51,774 DEBUG [Thread-646 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x491ea2ee to 127.0.0.1:56265 2024-11-25T17:08:51,774 DEBUG [Thread-646 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:51,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:51,777 DEBUG [Thread-650 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42e904d8 to 127.0.0.1:56265 2024-11-25T17:08:51,777 DEBUG [Thread-650 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:51,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:51,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:51,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:51,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:51,777 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:51,781 DEBUG [Thread-648 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0b44b1e5 to 127.0.0.1:56265 2024-11-25T17:08:51,781 DEBUG [Thread-648 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:51,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125a94284a7bda24245860206048d20ca78_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554531768/Put/seqid=0 2024-11-25T17:08:51,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742045_1221 (size=12454) 2024-11-25T17:08:51,799 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:51,799 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/bf3d5d920273424491a7ba8467ff0737 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/bf3d5d920273424491a7ba8467ff0737 2024-11-25T17:08:51,805 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125a94284a7bda24245860206048d20ca78_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125a94284a7bda24245860206048d20ca78_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:51,806 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/65b88316c66348ec96bb4137d68c21d3, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:51,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/65b88316c66348ec96bb4137d68c21d3 is 175, key is test_row_0/A:col10/1732554531768/Put/seqid=0 2024-11-25T17:08:51,807 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/B of c60040e46a02e10beaa963566dc1e39f into bf3d5d920273424491a7ba8467ff0737(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:51,807 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:51,807 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/B, priority=13, startTime=1732554531372; duration=0sec 2024-11-25T17:08:51,808 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:51,808 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:B 2024-11-25T17:08:51,808 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:51,809 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:51,809 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c60040e46a02e10beaa963566dc1e39f/C is initiating minor compaction (all files) 2024-11-25T17:08:51,809 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c60040e46a02e10beaa963566dc1e39f/C in TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:51,809 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/db8277f502cc4128935fd7399c49b43f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/1f0e3f841cfb45348ef675fa30203595, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/b9e0f97ae1984af6b58ef9b7be3436e8] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp, totalSize=36.7 K 2024-11-25T17:08:51,810 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting db8277f502cc4128935fd7399c49b43f, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732554525025 2024-11-25T17:08:51,820 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f0e3f841cfb45348ef675fa30203595, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732554527156 2024-11-25T17:08:51,820 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting b9e0f97ae1984af6b58ef9b7be3436e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732554529305 2024-11-25T17:08:51,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742046_1222 (size=31255) 2024-11-25T17:08:51,832 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#C#compaction#182 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:51,833 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/f6bb62e53b184f4289aa94bfeff81014 is 50, key is test_row_0/C:col10/1732554529305/Put/seqid=0 2024-11-25T17:08:51,846 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c60040e46a02e10beaa963566dc1e39f#A#compaction#180 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:51,847 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/65f08247c9a449ceb6a910104097cb60 is 175, key is test_row_0/A:col10/1732554529305/Put/seqid=0 2024-11-25T17:08:51,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742047_1223 (size=13051) 2024-11-25T17:08:51,867 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/f6bb62e53b184f4289aa94bfeff81014 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/f6bb62e53b184f4289aa94bfeff81014 2024-11-25T17:08:51,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742048_1224 (size=32005) 2024-11-25T17:08:51,874 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/65f08247c9a449ceb6a910104097cb60 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/65f08247c9a449ceb6a910104097cb60 2024-11-25T17:08:51,879 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/C of c60040e46a02e10beaa963566dc1e39f into f6bb62e53b184f4289aa94bfeff81014(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:51,879 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:51,879 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/C, priority=13, startTime=1732554531372; duration=0sec 2024-11-25T17:08:51,879 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:51,879 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:C 2024-11-25T17:08:51,884 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c60040e46a02e10beaa963566dc1e39f/A of c60040e46a02e10beaa963566dc1e39f into 65f08247c9a449ceb6a910104097cb60(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:51,884 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:51,884 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f., storeName=c60040e46a02e10beaa963566dc1e39f/A, priority=13, startTime=1732554531372; duration=0sec 2024-11-25T17:08:51,884 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:51,884 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c60040e46a02e10beaa963566dc1e39f:A 2024-11-25T17:08:52,221 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=346, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/65b88316c66348ec96bb4137d68c21d3 2024-11-25T17:08:52,228 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/ec5c275ab00a4761930a89cfb96bc31f is 50, key is test_row_0/B:col10/1732554531768/Put/seqid=0 2024-11-25T17:08:52,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742049_1225 (size=12301) 2024-11-25T17:08:52,624 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T17:08:52,632 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/ec5c275ab00a4761930a89cfb96bc31f 2024-11-25T17:08:52,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/91cc5f0b28f240768dde7293853540d9 is 50, key is test_row_0/C:col10/1732554531768/Put/seqid=0 2024-11-25T17:08:52,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742050_1226 (size=12301) 2024-11-25T17:08:53,053 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/91cc5f0b28f240768dde7293853540d9 2024-11-25T17:08:53,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/65b88316c66348ec96bb4137d68c21d3 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/65b88316c66348ec96bb4137d68c21d3 2024-11-25T17:08:53,061 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/65b88316c66348ec96bb4137d68c21d3, entries=150, sequenceid=346, filesize=30.5 K 2024-11-25T17:08:53,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/ec5c275ab00a4761930a89cfb96bc31f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/ec5c275ab00a4761930a89cfb96bc31f 2024-11-25T17:08:53,065 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/ec5c275ab00a4761930a89cfb96bc31f, entries=150, sequenceid=346, filesize=12.0 K 2024-11-25T17:08:53,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/91cc5f0b28f240768dde7293853540d9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/91cc5f0b28f240768dde7293853540d9 2024-11-25T17:08:53,069 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/91cc5f0b28f240768dde7293853540d9, entries=150, sequenceid=346, filesize=12.0 K 2024-11-25T17:08:53,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=6.71 KB/6870 for c60040e46a02e10beaa963566dc1e39f in 1297ms, sequenceid=346, compaction requested=false 2024-11-25T17:08:53,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:53,790 DEBUG [Thread-642 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2089ec29 to 127.0.0.1:56265 2024-11-25T17:08:53,790 DEBUG [Thread-642 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:53,801 DEBUG [Thread-644 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cae6c5c to 127.0.0.1:56265 2024-11-25T17:08:53,801 DEBUG [Thread-644 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:53,802 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-25T17:08:53,802 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 22 2024-11-25T17:08:53,802 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 22 2024-11-25T17:08:53,802 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 81 2024-11-25T17:08:53,802 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-11-25T17:08:53,802 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 83 2024-11-25T17:08:53,802 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-25T17:08:53,802 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5666 2024-11-25T17:08:53,802 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5573 2024-11-25T17:08:53,802 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-25T17:08:53,802 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2414 2024-11-25T17:08:53,802 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7241 rows 2024-11-25T17:08:53,802 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2429 2024-11-25T17:08:53,802 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7286 rows 2024-11-25T17:08:53,802 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-25T17:08:53,802 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x546bf8b8 to 127.0.0.1:56265 2024-11-25T17:08:53,802 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:08:53,804 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-25T17:08:53,805 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-25T17:08:53,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:53,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-25T17:08:53,808 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554533808"}]},"ts":"1732554533808"} 2024-11-25T17:08:53,809 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-25T17:08:53,812 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-25T17:08:53,813 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-25T17:08:53,815 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c60040e46a02e10beaa963566dc1e39f, UNASSIGN}] 2024-11-25T17:08:53,815 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c60040e46a02e10beaa963566dc1e39f, UNASSIGN 2024-11-25T17:08:53,816 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=c60040e46a02e10beaa963566dc1e39f, regionState=CLOSING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:08:53,817 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-25T17:08:53,817 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; CloseRegionProcedure c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:08:53,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-25T17:08:53,968 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:53,969 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(124): Close c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:53,969 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-25T17:08:53,969 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1681): Closing c60040e46a02e10beaa963566dc1e39f, disabling compactions & flushes 2024-11-25T17:08:53,969 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:53,969 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:53,969 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. after waiting 0 ms 2024-11-25T17:08:53,969 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:53,969 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(2837): Flushing c60040e46a02e10beaa963566dc1e39f 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-25T17:08:53,969 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=A 2024-11-25T17:08:53,969 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:53,969 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=B 2024-11-25T17:08:53,970 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:53,970 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c60040e46a02e10beaa963566dc1e39f, store=C 2024-11-25T17:08:53,970 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:53,975 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125ad4eef92d8be4650bb57050356c6815b_c60040e46a02e10beaa963566dc1e39f is 50, key is test_row_0/A:col10/1732554533789/Put/seqid=0 2024-11-25T17:08:53,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742051_1227 (size=12454) 2024-11-25T17:08:53,983 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:53,987 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125ad4eef92d8be4650bb57050356c6815b_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ad4eef92d8be4650bb57050356c6815b_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:53,988 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/d22fc96b10ee45888e5b8b6c27bdb5ac, store: [table=TestAcidGuarantees family=A region=c60040e46a02e10beaa963566dc1e39f] 2024-11-25T17:08:53,989 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/d22fc96b10ee45888e5b8b6c27bdb5ac is 175, key is test_row_0/A:col10/1732554533789/Put/seqid=0 2024-11-25T17:08:53,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742052_1228 (size=31255) 2024-11-25T17:08:54,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-25T17:08:54,394 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=355, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/d22fc96b10ee45888e5b8b6c27bdb5ac 2024-11-25T17:08:54,403 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/40d5d2322a034ba387fd0fd2ecd60f73 is 50, key is test_row_0/B:col10/1732554533789/Put/seqid=0 2024-11-25T17:08:54,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-25T17:08:54,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742053_1229 (size=12301) 2024-11-25T17:08:54,412 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/40d5d2322a034ba387fd0fd2ecd60f73 2024-11-25T17:08:54,427 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/3886c311c52c4331bfb551ceaecf8517 is 50, key is test_row_0/C:col10/1732554533789/Put/seqid=0 2024-11-25T17:08:54,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742054_1230 (size=12301) 2024-11-25T17:08:54,438 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/3886c311c52c4331bfb551ceaecf8517 2024-11-25T17:08:54,443 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/A/d22fc96b10ee45888e5b8b6c27bdb5ac as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/d22fc96b10ee45888e5b8b6c27bdb5ac 2024-11-25T17:08:54,453 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/d22fc96b10ee45888e5b8b6c27bdb5ac, entries=150, sequenceid=355, filesize=30.5 K 2024-11-25T17:08:54,471 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/B/40d5d2322a034ba387fd0fd2ecd60f73 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/40d5d2322a034ba387fd0fd2ecd60f73 2024-11-25T17:08:54,476 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/40d5d2322a034ba387fd0fd2ecd60f73, entries=150, sequenceid=355, filesize=12.0 K 2024-11-25T17:08:54,477 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/.tmp/C/3886c311c52c4331bfb551ceaecf8517 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/3886c311c52c4331bfb551ceaecf8517 2024-11-25T17:08:54,485 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/3886c311c52c4331bfb551ceaecf8517, entries=150, sequenceid=355, filesize=12.0 K 2024-11-25T17:08:54,489 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for c60040e46a02e10beaa963566dc1e39f in 520ms, sequenceid=355, compaction requested=true 2024-11-25T17:08:54,491 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/4a2bb12651244c3db5dd04d0cc936e87, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/2fbc3e6ce00f4dc1a7e496bda8395767, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/216b64a17ac949bbb5431cfb92700068, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/2da7bfca8e3343d99872cf7a0f474ce9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ab3dabf5b5f24a7c937bb68f1dd0fc70, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/702966b55cd94fe484e5dede495b0e8d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/11420c7d3db64fae85ef164422ed776b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/c9cb5dc4717840c08e0f52b4242ae211, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/859d85cafa784804abdafd1662c0811a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/fdd5ab5e16cc47baa2e40c6186a62465, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ed4e381df41b4c12b59e8a8e9f935fb1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/d73d376fafb94af895b7b5a4681c562c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ff09492d99ed4747b19237dfb5bbc7b9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/4a12bbe578d84cb1b6ddd33c03edf71e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/241441a00a3c4d64be039c9d245e70ff, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/db1677887fd24d38b11bb76d2bb8c814, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/255ebac4125c4c03bf3779c3f60e8e5e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/5726fe204c5241bc9ce870702b79c01b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/c6628831b6c942e19c4b413613163372, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/e819600c4ea44b0d9b6d7f2931cbc51d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/8ea2d3399971466bb8e1dde60362d510, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/7e1bc9ba5c484fb286905d6eab774bae] to archive 2024-11-25T17:08:54,492 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:08:54,495 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/4a2bb12651244c3db5dd04d0cc936e87 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/4a2bb12651244c3db5dd04d0cc936e87 2024-11-25T17:08:54,497 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/2fbc3e6ce00f4dc1a7e496bda8395767 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/2fbc3e6ce00f4dc1a7e496bda8395767 2024-11-25T17:08:54,502 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/216b64a17ac949bbb5431cfb92700068 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/216b64a17ac949bbb5431cfb92700068 2024-11-25T17:08:54,503 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/2da7bfca8e3343d99872cf7a0f474ce9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/2da7bfca8e3343d99872cf7a0f474ce9 2024-11-25T17:08:54,505 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ab3dabf5b5f24a7c937bb68f1dd0fc70 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ab3dabf5b5f24a7c937bb68f1dd0fc70 2024-11-25T17:08:54,506 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/702966b55cd94fe484e5dede495b0e8d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/702966b55cd94fe484e5dede495b0e8d 2024-11-25T17:08:54,508 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/11420c7d3db64fae85ef164422ed776b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/11420c7d3db64fae85ef164422ed776b 2024-11-25T17:08:54,510 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/c9cb5dc4717840c08e0f52b4242ae211 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/c9cb5dc4717840c08e0f52b4242ae211 2024-11-25T17:08:54,511 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/859d85cafa784804abdafd1662c0811a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/859d85cafa784804abdafd1662c0811a 2024-11-25T17:08:54,513 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/fdd5ab5e16cc47baa2e40c6186a62465 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/fdd5ab5e16cc47baa2e40c6186a62465 2024-11-25T17:08:54,514 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ed4e381df41b4c12b59e8a8e9f935fb1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ed4e381df41b4c12b59e8a8e9f935fb1 2024-11-25T17:08:54,516 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/d73d376fafb94af895b7b5a4681c562c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/d73d376fafb94af895b7b5a4681c562c 2024-11-25T17:08:54,519 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ff09492d99ed4747b19237dfb5bbc7b9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/ff09492d99ed4747b19237dfb5bbc7b9 2024-11-25T17:08:54,521 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/4a12bbe578d84cb1b6ddd33c03edf71e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/4a12bbe578d84cb1b6ddd33c03edf71e 2024-11-25T17:08:54,523 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/241441a00a3c4d64be039c9d245e70ff to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/241441a00a3c4d64be039c9d245e70ff 2024-11-25T17:08:54,524 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/db1677887fd24d38b11bb76d2bb8c814 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/db1677887fd24d38b11bb76d2bb8c814 2024-11-25T17:08:54,526 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/255ebac4125c4c03bf3779c3f60e8e5e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/255ebac4125c4c03bf3779c3f60e8e5e 2024-11-25T17:08:54,527 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/5726fe204c5241bc9ce870702b79c01b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/5726fe204c5241bc9ce870702b79c01b 2024-11-25T17:08:54,530 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/c6628831b6c942e19c4b413613163372 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/c6628831b6c942e19c4b413613163372 2024-11-25T17:08:54,533 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/e819600c4ea44b0d9b6d7f2931cbc51d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/e819600c4ea44b0d9b6d7f2931cbc51d 2024-11-25T17:08:54,536 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/8ea2d3399971466bb8e1dde60362d510 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/8ea2d3399971466bb8e1dde60362d510 2024-11-25T17:08:54,538 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/7e1bc9ba5c484fb286905d6eab774bae to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/7e1bc9ba5c484fb286905d6eab774bae 2024-11-25T17:08:54,546 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/e7d9962c1039411cb0ddc79f25f71e78, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/cd03b6a0d383465b9bd75e59b9e839a4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/7bee49e7cd294be2907d6ccc193be33d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/9eed1923d6e2413796c274671c9b27ec, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/d8ac8ae1798b46fea63c34cf888d1f71, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/5a0143e002be4c87921260e044c85e96, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/168771e9ad1e4b2cac578f63ea240bfb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/5e400d05e12d49dab5d8433be143b865, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/811f2ce2a3584562938bb3467a87180c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/8ede28a7d8804096a9202b5b97a17194, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/f5627db4c4b248078d2e74c7f7817647, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/849762f7f264476bb32e61b0ccb2c859, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/af3ae5f1024041e8875d7e489653db61, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/d82443a94bab497cae7c656c3096daf3, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/8f268081013341b8abb81cadb9c8cd57, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/91dbb05a17fa44a582a23dea912b9af0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/19571f75205e42d385d3235e3af1b62e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/767788f4442c41dcbbd59924688d95cc, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/2521b6671194488992ca8cc40c9d8579, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/0f26e761e4ed48d19e4c2deb5f4c5998, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/bb9d74f4753f4c15a320ac08a00c18ff, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/3791b75463204508b4daf46c628cb2e5] to archive 2024-11-25T17:08:54,547 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:08:54,550 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/e7d9962c1039411cb0ddc79f25f71e78 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/e7d9962c1039411cb0ddc79f25f71e78 2024-11-25T17:08:54,552 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/cd03b6a0d383465b9bd75e59b9e839a4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/cd03b6a0d383465b9bd75e59b9e839a4 2024-11-25T17:08:54,554 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/7bee49e7cd294be2907d6ccc193be33d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/7bee49e7cd294be2907d6ccc193be33d 2024-11-25T17:08:54,555 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/9eed1923d6e2413796c274671c9b27ec to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/9eed1923d6e2413796c274671c9b27ec 2024-11-25T17:08:54,556 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/d8ac8ae1798b46fea63c34cf888d1f71 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/d8ac8ae1798b46fea63c34cf888d1f71 2024-11-25T17:08:54,559 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/5a0143e002be4c87921260e044c85e96 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/5a0143e002be4c87921260e044c85e96 2024-11-25T17:08:54,561 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/168771e9ad1e4b2cac578f63ea240bfb to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/168771e9ad1e4b2cac578f63ea240bfb 2024-11-25T17:08:54,563 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/5e400d05e12d49dab5d8433be143b865 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/5e400d05e12d49dab5d8433be143b865 2024-11-25T17:08:54,564 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/811f2ce2a3584562938bb3467a87180c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/811f2ce2a3584562938bb3467a87180c 2024-11-25T17:08:54,565 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/8ede28a7d8804096a9202b5b97a17194 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/8ede28a7d8804096a9202b5b97a17194 2024-11-25T17:08:54,567 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/f5627db4c4b248078d2e74c7f7817647 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/f5627db4c4b248078d2e74c7f7817647 2024-11-25T17:08:54,568 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/849762f7f264476bb32e61b0ccb2c859 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/849762f7f264476bb32e61b0ccb2c859 2024-11-25T17:08:54,570 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/af3ae5f1024041e8875d7e489653db61 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/af3ae5f1024041e8875d7e489653db61 2024-11-25T17:08:54,571 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/d82443a94bab497cae7c656c3096daf3 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/d82443a94bab497cae7c656c3096daf3 2024-11-25T17:08:54,572 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/8f268081013341b8abb81cadb9c8cd57 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/8f268081013341b8abb81cadb9c8cd57 2024-11-25T17:08:54,574 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/91dbb05a17fa44a582a23dea912b9af0 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/91dbb05a17fa44a582a23dea912b9af0 2024-11-25T17:08:54,575 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/19571f75205e42d385d3235e3af1b62e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/19571f75205e42d385d3235e3af1b62e 2024-11-25T17:08:54,576 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/767788f4442c41dcbbd59924688d95cc to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/767788f4442c41dcbbd59924688d95cc 2024-11-25T17:08:54,578 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/2521b6671194488992ca8cc40c9d8579 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/2521b6671194488992ca8cc40c9d8579 2024-11-25T17:08:54,581 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/0f26e761e4ed48d19e4c2deb5f4c5998 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/0f26e761e4ed48d19e4c2deb5f4c5998 2024-11-25T17:08:54,582 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/bb9d74f4753f4c15a320ac08a00c18ff to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/bb9d74f4753f4c15a320ac08a00c18ff 2024-11-25T17:08:54,583 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/3791b75463204508b4daf46c628cb2e5 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/3791b75463204508b4daf46c628cb2e5 2024-11-25T17:08:54,584 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/587f8d0b39eb47bbb74680fdc07beb02, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/c4e8b18614ad4d0ea8244b2d29cebd5c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/5374b1c66bd04d3a8cdf8cbb2e34eb4e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/a747d79dd8074779b7f6b6941c2c0fba, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/7244f830ee1c44beb950964072c9bc83, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/17b4c2dd7ee146e395231fb1b5765d16, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/ed4d0a2fe0f74e4280ef61d1b79a1686, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/65533ab0b6b74c7593f76f48d3ee8150, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/423dd693721a4b8fbaf49dbe7da2d85f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/c0b1edb9c4844d51b72089083804014a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/2e2ca5abd29c48309cce873266b185e2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/2d21014f9df24495a1babee5f0743875, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/865391811da94e41ac3f7e66f5380009, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/cd7571cf7c31424ba1db4bd2b96dfd09, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/2f71f389cb9d40abb32c9202b398868b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/277af2f02de940f298589c223b288848, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/0d3e6f7132754b04800bb75d65318d0c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/f948c474b445440ebb6094b24b9f7512, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/db8277f502cc4128935fd7399c49b43f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/34c3d4a51e0e4b9ab2ef6fc0dd103720, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/1f0e3f841cfb45348ef675fa30203595, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/b9e0f97ae1984af6b58ef9b7be3436e8] to archive 2024-11-25T17:08:54,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:08:54,587 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/587f8d0b39eb47bbb74680fdc07beb02 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/587f8d0b39eb47bbb74680fdc07beb02 2024-11-25T17:08:54,588 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/c4e8b18614ad4d0ea8244b2d29cebd5c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/c4e8b18614ad4d0ea8244b2d29cebd5c 2024-11-25T17:08:54,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/5374b1c66bd04d3a8cdf8cbb2e34eb4e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/5374b1c66bd04d3a8cdf8cbb2e34eb4e 2024-11-25T17:08:54,590 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/a747d79dd8074779b7f6b6941c2c0fba to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/a747d79dd8074779b7f6b6941c2c0fba 2024-11-25T17:08:54,591 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/7244f830ee1c44beb950964072c9bc83 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/7244f830ee1c44beb950964072c9bc83 2024-11-25T17:08:54,592 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/17b4c2dd7ee146e395231fb1b5765d16 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/17b4c2dd7ee146e395231fb1b5765d16 2024-11-25T17:08:54,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/ed4d0a2fe0f74e4280ef61d1b79a1686 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/ed4d0a2fe0f74e4280ef61d1b79a1686 2024-11-25T17:08:54,595 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/65533ab0b6b74c7593f76f48d3ee8150 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/65533ab0b6b74c7593f76f48d3ee8150 2024-11-25T17:08:54,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/423dd693721a4b8fbaf49dbe7da2d85f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/423dd693721a4b8fbaf49dbe7da2d85f 2024-11-25T17:08:54,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/c0b1edb9c4844d51b72089083804014a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/c0b1edb9c4844d51b72089083804014a 2024-11-25T17:08:54,598 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/2e2ca5abd29c48309cce873266b185e2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/2e2ca5abd29c48309cce873266b185e2 2024-11-25T17:08:54,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/2d21014f9df24495a1babee5f0743875 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/2d21014f9df24495a1babee5f0743875 2024-11-25T17:08:54,600 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/865391811da94e41ac3f7e66f5380009 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/865391811da94e41ac3f7e66f5380009 2024-11-25T17:08:54,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/cd7571cf7c31424ba1db4bd2b96dfd09 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/cd7571cf7c31424ba1db4bd2b96dfd09 2024-11-25T17:08:54,603 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/2f71f389cb9d40abb32c9202b398868b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/2f71f389cb9d40abb32c9202b398868b 2024-11-25T17:08:54,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/277af2f02de940f298589c223b288848 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/277af2f02de940f298589c223b288848 2024-11-25T17:08:54,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/0d3e6f7132754b04800bb75d65318d0c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/0d3e6f7132754b04800bb75d65318d0c 2024-11-25T17:08:54,607 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/f948c474b445440ebb6094b24b9f7512 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/f948c474b445440ebb6094b24b9f7512 2024-11-25T17:08:54,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/db8277f502cc4128935fd7399c49b43f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/db8277f502cc4128935fd7399c49b43f 2024-11-25T17:08:54,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/34c3d4a51e0e4b9ab2ef6fc0dd103720 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/34c3d4a51e0e4b9ab2ef6fc0dd103720 2024-11-25T17:08:54,610 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/1f0e3f841cfb45348ef675fa30203595 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/1f0e3f841cfb45348ef675fa30203595 2024-11-25T17:08:54,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/b9e0f97ae1984af6b58ef9b7be3436e8 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/b9e0f97ae1984af6b58ef9b7be3436e8 2024-11-25T17:08:54,616 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/recovered.edits/358.seqid, newMaxSeqId=358, maxSeqId=4 2024-11-25T17:08:54,616 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f. 2024-11-25T17:08:54,616 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1635): Region close journal for c60040e46a02e10beaa963566dc1e39f: 2024-11-25T17:08:54,618 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(170): Closed c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,618 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=c60040e46a02e10beaa963566dc1e39f, regionState=CLOSED 2024-11-25T17:08:54,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-25T17:08:54,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseRegionProcedure c60040e46a02e10beaa963566dc1e39f, server=6579369734b6,41865,1732554474464 in 802 msec 2024-11-25T17:08:54,621 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-11-25T17:08:54,621 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c60040e46a02e10beaa963566dc1e39f, UNASSIGN in 806 msec 2024-11-25T17:08:54,622 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-25T17:08:54,622 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 808 msec 2024-11-25T17:08:54,624 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554534623"}]},"ts":"1732554534623"} 2024-11-25T17:08:54,624 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-25T17:08:54,626 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-25T17:08:54,628 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 822 msec 2024-11-25T17:08:54,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-25T17:08:54,911 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-25T17:08:54,911 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-25T17:08:54,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:54,912 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:54,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-25T17:08:54,913 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=67, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:54,915 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,917 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/recovered.edits] 2024-11-25T17:08:54,920 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/65b88316c66348ec96bb4137d68c21d3 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/65b88316c66348ec96bb4137d68c21d3 2024-11-25T17:08:54,921 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/65f08247c9a449ceb6a910104097cb60 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/65f08247c9a449ceb6a910104097cb60 2024-11-25T17:08:54,922 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/d22fc96b10ee45888e5b8b6c27bdb5ac to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/A/d22fc96b10ee45888e5b8b6c27bdb5ac 2024-11-25T17:08:54,924 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/40d5d2322a034ba387fd0fd2ecd60f73 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/40d5d2322a034ba387fd0fd2ecd60f73 2024-11-25T17:08:54,925 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/bf3d5d920273424491a7ba8467ff0737 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/bf3d5d920273424491a7ba8467ff0737 2024-11-25T17:08:54,926 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/ec5c275ab00a4761930a89cfb96bc31f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/B/ec5c275ab00a4761930a89cfb96bc31f 2024-11-25T17:08:54,929 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/3886c311c52c4331bfb551ceaecf8517 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/3886c311c52c4331bfb551ceaecf8517 2024-11-25T17:08:54,930 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/91cc5f0b28f240768dde7293853540d9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/91cc5f0b28f240768dde7293853540d9 2024-11-25T17:08:54,931 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/f6bb62e53b184f4289aa94bfeff81014 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/C/f6bb62e53b184f4289aa94bfeff81014 2024-11-25T17:08:54,933 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/recovered.edits/358.seqid to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f/recovered.edits/358.seqid 2024-11-25T17:08:54,933 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,933 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-25T17:08:54,934 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-25T17:08:54,935 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-25T17:08:54,938 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112500373dc86e9548c09333b75cf391093f_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112500373dc86e9548c09333b75cf391093f_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,939 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112519b39cc3117946708e2a8dfffcb37a1f_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112519b39cc3117946708e2a8dfffcb37a1f_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,941 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112520a75e5f75bc486990ff14dcba824fa9_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112520a75e5f75bc486990ff14dcba824fa9_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,942 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411252dd411ced94f42bb83c9a90f0d672a47_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411252dd411ced94f42bb83c9a90f0d672a47_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,943 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112530f43586dbe148f0a446ec988187ce4f_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112530f43586dbe148f0a446ec988187ce4f_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,944 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112566ee7ec363484ba7a22f5b1fc730a746_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112566ee7ec363484ba7a22f5b1fc730a746_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,945 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125708dbb64666a4d69988077b908882380_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125708dbb64666a4d69988077b908882380_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,946 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125723c34a0657a46b2a932695b562326ab_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125723c34a0657a46b2a932695b562326ab_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,948 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411257988bec819f643c098fca8c29fdf3aee_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411257988bec819f643c098fca8c29fdf3aee_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,949 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125807f81f1441746d7b8c732d3022a0842_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125807f81f1441746d7b8c732d3022a0842_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,950 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112595a5405a32a644c99d99ceac98108159_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112595a5405a32a644c99d99ceac98108159_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,951 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125a45d81c9e7e14f43bc4dbad852b8eac6_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125a45d81c9e7e14f43bc4dbad852b8eac6_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,953 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125a94284a7bda24245860206048d20ca78_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125a94284a7bda24245860206048d20ca78_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,954 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125acea707c09e04a799453ed21eb00a1c0_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125acea707c09e04a799453ed21eb00a1c0_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,955 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ad4eef92d8be4650bb57050356c6815b_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ad4eef92d8be4650bb57050356c6815b_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,957 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125b546b59465ba456190f4ac2bcc6fa678_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125b546b59465ba456190f4ac2bcc6fa678_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,958 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125e93f3f6c279c47e1b3b46c94b5362f8f_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125e93f3f6c279c47e1b3b46c94b5362f8f_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,960 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ef89400ffc4e4613915a05fcdd008768_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ef89400ffc4e4613915a05fcdd008768_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,961 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125efe66a127d184ec7bc053c1d41c2c39f_c60040e46a02e10beaa963566dc1e39f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125efe66a127d184ec7bc053c1d41c2c39f_c60040e46a02e10beaa963566dc1e39f 2024-11-25T17:08:54,962 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-25T17:08:54,964 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=67, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:54,967 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-25T17:08:54,969 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-25T17:08:54,970 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=67, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:54,970 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-25T17:08:54,970 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732554534970"}]},"ts":"9223372036854775807"} 2024-11-25T17:08:54,972 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-25T17:08:54,972 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => c60040e46a02e10beaa963566dc1e39f, NAME => 'TestAcidGuarantees,,1732554506632.c60040e46a02e10beaa963566dc1e39f.', STARTKEY => '', ENDKEY => ''}] 2024-11-25T17:08:54,972 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-25T17:08:54,972 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732554534972"}]},"ts":"9223372036854775807"} 2024-11-25T17:08:54,973 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-25T17:08:54,976 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=67, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:54,976 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 65 msec 2024-11-25T17:08:55,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-25T17:08:55,014 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-25T17:08:55,023 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=242 (was 238) Potentially hanging thread: hconnection-0x327ba634-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x327ba634-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x327ba634-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/cluster_d0c44768-23d8-26f7-d2cc-a3902272cb55/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400252136_22 at /127.0.0.1:49064 [Waiting for operation #1070] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/cluster_d0c44768-23d8-26f7-d2cc-a3902272cb55/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x327ba634-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1427976722_22 at /127.0.0.1:56088 [Waiting for operation #724] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1400252136_22 at /127.0.0.1:36936 [Waiting for operation #693] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1427976722_22 at /127.0.0.1:36966 [Waiting for operation #620] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=463 (was 454) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=641 (was 650), ProcessCount=11 (was 11), AvailableMemoryMB=2546 (was 2214) - AvailableMemoryMB LEAK? - 2024-11-25T17:08:55,031 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=242, OpenFileDescriptor=463, MaxFileDescriptor=1048576, SystemLoadAverage=641, ProcessCount=11, AvailableMemoryMB=2546 2024-11-25T17:08:55,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-25T17:08:55,033 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T17:08:55,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-25T17:08:55,034 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T17:08:55,034 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:55,035 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 68 2024-11-25T17:08:55,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-25T17:08:55,035 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T17:08:55,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742055_1231 (size=963) 2024-11-25T17:08:55,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-25T17:08:55,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-25T17:08:55,443 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4 2024-11-25T17:08:55,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742056_1232 (size=53) 2024-11-25T17:08:55,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-25T17:08:55,854 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:08:55,854 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 26bdcc7959673ac8abf209b84227d813, disabling compactions & flushes 2024-11-25T17:08:55,854 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:55,854 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:55,854 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. after waiting 0 ms 2024-11-25T17:08:55,854 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:55,854 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:55,854 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:08:55,855 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T17:08:55,855 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732554535855"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732554535855"}]},"ts":"1732554535855"} 2024-11-25T17:08:55,856 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-25T17:08:55,857 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T17:08:55,857 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554535857"}]},"ts":"1732554535857"} 2024-11-25T17:08:55,858 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-25T17:08:55,862 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=26bdcc7959673ac8abf209b84227d813, ASSIGN}] 2024-11-25T17:08:55,863 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=26bdcc7959673ac8abf209b84227d813, ASSIGN 2024-11-25T17:08:55,864 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=26bdcc7959673ac8abf209b84227d813, ASSIGN; state=OFFLINE, location=6579369734b6,41865,1732554474464; forceNewPlan=false, retain=false 2024-11-25T17:08:56,014 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=26bdcc7959673ac8abf209b84227d813, regionState=OPENING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:08:56,015 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; OpenRegionProcedure 26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:08:56,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-25T17:08:56,167 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:56,170 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:56,170 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7285): Opening region: {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} 2024-11-25T17:08:56,170 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:08:56,170 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:08:56,170 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7327): checking encryption for 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:08:56,170 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7330): checking classloading for 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:08:56,172 INFO [StoreOpener-26bdcc7959673ac8abf209b84227d813-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:08:56,173 INFO [StoreOpener-26bdcc7959673ac8abf209b84227d813-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:08:56,173 INFO [StoreOpener-26bdcc7959673ac8abf209b84227d813-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 26bdcc7959673ac8abf209b84227d813 columnFamilyName A 2024-11-25T17:08:56,173 DEBUG [StoreOpener-26bdcc7959673ac8abf209b84227d813-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:56,174 INFO [StoreOpener-26bdcc7959673ac8abf209b84227d813-1 {}] regionserver.HStore(327): Store=26bdcc7959673ac8abf209b84227d813/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:08:56,174 INFO [StoreOpener-26bdcc7959673ac8abf209b84227d813-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:08:56,175 INFO [StoreOpener-26bdcc7959673ac8abf209b84227d813-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:08:56,175 INFO [StoreOpener-26bdcc7959673ac8abf209b84227d813-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 26bdcc7959673ac8abf209b84227d813 columnFamilyName B 2024-11-25T17:08:56,175 DEBUG [StoreOpener-26bdcc7959673ac8abf209b84227d813-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:56,175 INFO [StoreOpener-26bdcc7959673ac8abf209b84227d813-1 {}] regionserver.HStore(327): Store=26bdcc7959673ac8abf209b84227d813/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:08:56,175 INFO [StoreOpener-26bdcc7959673ac8abf209b84227d813-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:08:56,176 INFO [StoreOpener-26bdcc7959673ac8abf209b84227d813-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:08:56,176 INFO [StoreOpener-26bdcc7959673ac8abf209b84227d813-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 26bdcc7959673ac8abf209b84227d813 columnFamilyName C 2024-11-25T17:08:56,176 DEBUG [StoreOpener-26bdcc7959673ac8abf209b84227d813-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:08:56,177 INFO [StoreOpener-26bdcc7959673ac8abf209b84227d813-1 {}] regionserver.HStore(327): Store=26bdcc7959673ac8abf209b84227d813/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:08:56,177 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:56,177 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813 2024-11-25T17:08:56,178 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813 2024-11-25T17:08:56,179 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T17:08:56,180 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1085): writing seq id for 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:08:56,182 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T17:08:56,182 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1102): Opened 26bdcc7959673ac8abf209b84227d813; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72098104, jitterRate=0.07434546947479248}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T17:08:56,183 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1001): Region open journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:08:56,183 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., pid=70, masterSystemTime=1732554536166 2024-11-25T17:08:56,185 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:56,185 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:56,185 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=26bdcc7959673ac8abf209b84227d813, regionState=OPEN, openSeqNum=2, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:08:56,187 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-25T17:08:56,187 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; OpenRegionProcedure 26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 in 171 msec 2024-11-25T17:08:56,188 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-11-25T17:08:56,188 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=26bdcc7959673ac8abf209b84227d813, ASSIGN in 325 msec 2024-11-25T17:08:56,189 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T17:08:56,189 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554536189"}]},"ts":"1732554536189"} 2024-11-25T17:08:56,190 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-25T17:08:56,192 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T17:08:56,193 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1590 sec 2024-11-25T17:08:57,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-25T17:08:57,139 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 68 completed 2024-11-25T17:08:57,141 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f810aa9 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b70f48f 2024-11-25T17:08:57,144 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f66057f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:57,146 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:57,147 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36510, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:57,148 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T17:08:57,149 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43216, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T17:08:57,152 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64dc42d9 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58341641 2024-11-25T17:08:57,155 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17b6adc5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:57,156 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c1ac389 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@44645c55 2024-11-25T17:08:57,162 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@669e1999, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:57,163 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x028e73c0 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@64ee0130 2024-11-25T17:08:57,166 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72aa9ee5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:57,167 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c480dfb to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683b64c3 2024-11-25T17:08:57,170 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ec09297, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:57,171 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34cb3991 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e55eb7 2024-11-25T17:08:57,175 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dfb20f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:57,176 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x03a703d2 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17cf7fc0 2024-11-25T17:08:57,179 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@560ec309, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:57,180 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14ed1e44 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78b04266 2024-11-25T17:08:57,182 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5886c0f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:57,184 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72537a47 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@88aa519 2024-11-25T17:08:57,186 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66e575aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:57,187 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x036642cb to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e998dd3 2024-11-25T17:08:57,190 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@131ceb8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:57,191 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c299cfb to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e4c79b8 2024-11-25T17:08:57,193 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a78bf6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:08:57,196 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:08:57,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-25T17:08:57,198 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:08:57,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-25T17:08:57,198 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:08:57,198 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:08:57,201 DEBUG [hconnection-0x63a19510-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:57,201 DEBUG [hconnection-0xa23afe1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:57,201 DEBUG [hconnection-0x7ba186cb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:57,202 DEBUG [hconnection-0x67406ff5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:57,202 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36530, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:57,202 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36540, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:57,203 DEBUG [hconnection-0x74436fc6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:57,203 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36520, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:57,203 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36544, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:57,203 DEBUG [hconnection-0x6458167e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:57,204 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36560, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:57,204 DEBUG [hconnection-0x5cef1a1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:57,205 DEBUG [hconnection-0x18216bb1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:57,205 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36572, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:57,206 DEBUG [hconnection-0x4142f836-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:57,206 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36570, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:57,206 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36586, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:57,208 DEBUG [hconnection-0x5c6c801e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:08:57,208 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36590, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:57,209 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36594, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:08:57,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:08:57,240 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-25T17:08:57,241 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:08:57,241 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:57,241 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:08:57,241 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:57,241 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:08:57,241 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:57,267 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/c39421fcbd9e441998fc94cd7366f63a is 50, key is test_row_0/A:col10/1732554537212/Put/seqid=0 2024-11-25T17:08:57,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742057_1233 (size=14341) 2024-11-25T17:08:57,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-25T17:08:57,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554597317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554597318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554597319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554597319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554597323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,358 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:57,359 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-25T17:08:57,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:57,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:08:57,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:57,360 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:57,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:57,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:57,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554597429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554597429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554597429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554597434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554597435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-25T17:08:57,514 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:57,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-25T17:08:57,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:57,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:08:57,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:57,517 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:57,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:57,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:57,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554597633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554597633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554597635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554597636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554597637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,672 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:57,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/c39421fcbd9e441998fc94cd7366f63a 2024-11-25T17:08:57,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-25T17:08:57,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:57,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:08:57,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:57,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:57,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:57,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:57,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/982138270feb439d8b670e8820f70b70 is 50, key is test_row_0/B:col10/1732554537212/Put/seqid=0 2024-11-25T17:08:57,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742058_1234 (size=12001) 2024-11-25T17:08:57,720 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/982138270feb439d8b670e8820f70b70 2024-11-25T17:08:57,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/3fef88347fa84100a4b4f009b7067376 is 50, key is test_row_0/C:col10/1732554537212/Put/seqid=0 2024-11-25T17:08:57,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742059_1235 (size=12001) 2024-11-25T17:08:57,790 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/3fef88347fa84100a4b4f009b7067376 2024-11-25T17:08:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-25T17:08:57,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/c39421fcbd9e441998fc94cd7366f63a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/c39421fcbd9e441998fc94cd7366f63a 2024-11-25T17:08:57,813 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/c39421fcbd9e441998fc94cd7366f63a, entries=200, sequenceid=13, filesize=14.0 K 2024-11-25T17:08:57,815 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-25T17:08:57,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/982138270feb439d8b670e8820f70b70 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/982138270feb439d8b670e8820f70b70 2024-11-25T17:08:57,823 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/982138270feb439d8b670e8820f70b70, entries=150, sequenceid=13, filesize=11.7 K 2024-11-25T17:08:57,827 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:57,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-25T17:08:57,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:57,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:08:57,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:57,830 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:57,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:57,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:57,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/3fef88347fa84100a4b4f009b7067376 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3fef88347fa84100a4b4f009b7067376 2024-11-25T17:08:57,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3fef88347fa84100a4b4f009b7067376, entries=150, sequenceid=13, filesize=11.7 K 2024-11-25T17:08:57,844 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-25T17:08:57,845 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 26bdcc7959673ac8abf209b84227d813 in 604ms, sequenceid=13, compaction requested=false 2024-11-25T17:08:57,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:08:57,911 DEBUG [master/6579369734b6:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 44c7b6d5dcb77061152173d1606a877a changed from -1.0 to 0.0, refreshing cache 2024-11-25T17:08:57,940 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-25T17:08:57,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:08:57,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:08:57,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:57,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:08:57,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:57,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:08:57,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:57,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/62ee8182335b4cd2af104575855a9a9f is 50, key is test_row_0/A:col10/1732554537317/Put/seqid=0 2024-11-25T17:08:57,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554597949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554597949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554597950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:57,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554597950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554597951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:57,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742060_1236 (size=14341) 2024-11-25T17:08:57,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/62ee8182335b4cd2af104575855a9a9f 2024-11-25T17:08:57,982 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:57,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-25T17:08:57,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:57,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:08:57,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:57,983 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:57,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:57,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:57,995 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/38da297267754c15b98cbdc9e07ac37d is 50, key is test_row_0/B:col10/1732554537317/Put/seqid=0 2024-11-25T17:08:58,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742061_1237 (size=12001) 2024-11-25T17:08:58,019 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/38da297267754c15b98cbdc9e07ac37d 2024-11-25T17:08:58,035 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/5609f2c860ba4980a490458cfc3e4d07 is 50, key is test_row_0/C:col10/1732554537317/Put/seqid=0 2024-11-25T17:08:58,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554598056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554598062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554598070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554598076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554598076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742062_1238 (size=12001) 2024-11-25T17:08:58,136 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:58,136 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-25T17:08:58,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:58,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:08:58,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:58,137 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554598259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554598265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554598274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554598281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,289 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:58,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-25T17:08:58,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:58,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:08:58,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:58,290 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554598290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-25T17:08:58,453 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:58,454 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-25T17:08:58,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:58,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:08:58,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:58,454 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,507 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/5609f2c860ba4980a490458cfc3e4d07 2024-11-25T17:08:58,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/62ee8182335b4cd2af104575855a9a9f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/62ee8182335b4cd2af104575855a9a9f 2024-11-25T17:08:58,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/62ee8182335b4cd2af104575855a9a9f, entries=200, sequenceid=39, filesize=14.0 K 2024-11-25T17:08:58,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/38da297267754c15b98cbdc9e07ac37d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/38da297267754c15b98cbdc9e07ac37d 2024-11-25T17:08:58,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/38da297267754c15b98cbdc9e07ac37d, entries=150, sequenceid=39, filesize=11.7 K 2024-11-25T17:08:58,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/5609f2c860ba4980a490458cfc3e4d07 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/5609f2c860ba4980a490458cfc3e4d07 2024-11-25T17:08:58,557 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/5609f2c860ba4980a490458cfc3e4d07, entries=150, sequenceid=39, filesize=11.7 K 2024-11-25T17:08:58,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 26bdcc7959673ac8abf209b84227d813 in 618ms, sequenceid=39, compaction requested=false 2024-11-25T17:08:58,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:08:58,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:08:58,565 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-25T17:08:58,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:08:58,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:58,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:08:58,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:58,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:08:58,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:58,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/b5fbae94e51a47749469d6e4ae8550ad is 50, key is test_row_0/A:col10/1732554537950/Put/seqid=0 2024-11-25T17:08:58,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742063_1239 (size=12001) 2024-11-25T17:08:58,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554598596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554598597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554598599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554598600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554598600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,614 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:58,615 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-25T17:08:58,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:58,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:08:58,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:58,615 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554598702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554598702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554598704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554598704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554598705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,768 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:58,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-25T17:08:58,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:58,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:08:58,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:58,774 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554598905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554598908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554598909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554598909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:58,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554598913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:58,929 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:58,933 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-25T17:08:58,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:58,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:08:58,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:58,934 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:58,991 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/b5fbae94e51a47749469d6e4ae8550ad 2024-11-25T17:08:58,998 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/75bfe25871474675a4f9481316d9a0ea is 50, key is test_row_0/B:col10/1732554537950/Put/seqid=0 2024-11-25T17:08:59,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742064_1240 (size=12001) 2024-11-25T17:08:59,086 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:59,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-25T17:08:59,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:59,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:08:59,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:59,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:59,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:59,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:59,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:59,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554599210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:59,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:59,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554599212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:59,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:59,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554599214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:59,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:59,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554599214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:59,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:59,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554599219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:59,239 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:59,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-25T17:08:59,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:59,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:08:59,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:59,240 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:59,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:59,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:59,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-25T17:08:59,392 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:59,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-25T17:08:59,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:59,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:08:59,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:59,394 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:59,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:59,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:59,410 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/75bfe25871474675a4f9481316d9a0ea 2024-11-25T17:08:59,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/19989c5b70a848c2a9d3abf60f2cf6b6 is 50, key is test_row_0/C:col10/1732554537950/Put/seqid=0 2024-11-25T17:08:59,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742065_1241 (size=12001) 2024-11-25T17:08:59,503 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/19989c5b70a848c2a9d3abf60f2cf6b6 2024-11-25T17:08:59,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/b5fbae94e51a47749469d6e4ae8550ad as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/b5fbae94e51a47749469d6e4ae8550ad 2024-11-25T17:08:59,536 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/b5fbae94e51a47749469d6e4ae8550ad, entries=150, sequenceid=51, filesize=11.7 K 2024-11-25T17:08:59,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/75bfe25871474675a4f9481316d9a0ea as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/75bfe25871474675a4f9481316d9a0ea 2024-11-25T17:08:59,548 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/75bfe25871474675a4f9481316d9a0ea, entries=150, sequenceid=51, filesize=11.7 K 2024-11-25T17:08:59,550 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:59,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/19989c5b70a848c2a9d3abf60f2cf6b6 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/19989c5b70a848c2a9d3abf60f2cf6b6 2024-11-25T17:08:59,550 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-25T17:08:59,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:59,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:08:59,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:59,551 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:59,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:59,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:08:59,570 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/19989c5b70a848c2a9d3abf60f2cf6b6, entries=150, sequenceid=51, filesize=11.7 K 2024-11-25T17:08:59,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 26bdcc7959673ac8abf209b84227d813 in 1007ms, sequenceid=51, compaction requested=true 2024-11-25T17:08:59,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:08:59,572 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:59,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:08:59,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:59,573 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:59,574 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:59,574 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/B is initiating minor compaction (all files) 2024-11-25T17:08:59,574 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/B in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:59,574 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/982138270feb439d8b670e8820f70b70, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/38da297267754c15b98cbdc9e07ac37d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/75bfe25871474675a4f9481316d9a0ea] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=35.2 K 2024-11-25T17:08:59,575 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:59,575 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/A is initiating minor compaction (all files) 2024-11-25T17:08:59,575 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/A in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:59,575 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/c39421fcbd9e441998fc94cd7366f63a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/62ee8182335b4cd2af104575855a9a9f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/b5fbae94e51a47749469d6e4ae8550ad] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=39.7 K 2024-11-25T17:08:59,576 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting c39421fcbd9e441998fc94cd7366f63a, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732554537210 2024-11-25T17:08:59,576 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 982138270feb439d8b670e8820f70b70, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732554537210 2024-11-25T17:08:59,576 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 38da297267754c15b98cbdc9e07ac37d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732554537317 2024-11-25T17:08:59,576 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62ee8182335b4cd2af104575855a9a9f, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732554537316 2024-11-25T17:08:59,577 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5fbae94e51a47749469d6e4ae8550ad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732554537945 2024-11-25T17:08:59,577 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 75bfe25871474675a4f9481316d9a0ea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732554537945 2024-11-25T17:08:59,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:08:59,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:59,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:08:59,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:59,591 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#A#compaction#197 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:59,592 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/1b6fac69135a441e935e55b9607158e2 is 50, key is test_row_0/A:col10/1732554537950/Put/seqid=0 2024-11-25T17:08:59,593 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#B#compaction#198 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:59,594 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/ed7434604c924bfdb0e3d60c5ccbe2b7 is 50, key is test_row_0/B:col10/1732554537950/Put/seqid=0 2024-11-25T17:08:59,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742066_1242 (size=12104) 2024-11-25T17:08:59,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742067_1243 (size=12104) 2024-11-25T17:08:59,665 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/1b6fac69135a441e935e55b9607158e2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/1b6fac69135a441e935e55b9607158e2 2024-11-25T17:08:59,672 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/A of 26bdcc7959673ac8abf209b84227d813 into 1b6fac69135a441e935e55b9607158e2(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:59,672 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:08:59,672 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/A, priority=13, startTime=1732554539572; duration=0sec 2024-11-25T17:08:59,672 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:08:59,672 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:A 2024-11-25T17:08:59,672 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:08:59,674 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:08:59,674 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/C is initiating minor compaction (all files) 2024-11-25T17:08:59,674 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/C in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:59,674 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3fef88347fa84100a4b4f009b7067376, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/5609f2c860ba4980a490458cfc3e4d07, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/19989c5b70a848c2a9d3abf60f2cf6b6] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=35.2 K 2024-11-25T17:08:59,675 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fef88347fa84100a4b4f009b7067376, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732554537210 2024-11-25T17:08:59,675 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5609f2c860ba4980a490458cfc3e4d07, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732554537317 2024-11-25T17:08:59,675 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19989c5b70a848c2a9d3abf60f2cf6b6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732554537945 2024-11-25T17:08:59,681 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/ed7434604c924bfdb0e3d60c5ccbe2b7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/ed7434604c924bfdb0e3d60c5ccbe2b7 2024-11-25T17:08:59,688 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/B of 26bdcc7959673ac8abf209b84227d813 into ed7434604c924bfdb0e3d60c5ccbe2b7(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:59,688 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:08:59,688 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/B, priority=13, startTime=1732554539573; duration=0sec 2024-11-25T17:08:59,688 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:59,688 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:B 2024-11-25T17:08:59,691 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#C#compaction#199 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:08:59,691 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/c8332d5bd00049a68f8161dcac5d752e is 50, key is test_row_0/C:col10/1732554537950/Put/seqid=0 2024-11-25T17:08:59,708 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:08:59,709 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-25T17:08:59,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:08:59,709 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-25T17:08:59,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:08:59,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:59,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:08:59,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:59,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:08:59,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:08:59,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742068_1244 (size=12104) 2024-11-25T17:08:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:08:59,726 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:08:59,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/d114e9c7b9c840b48840b67ae31596f9 is 50, key is test_row_0/A:col10/1732554538598/Put/seqid=0 2024-11-25T17:08:59,740 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/c8332d5bd00049a68f8161dcac5d752e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c8332d5bd00049a68f8161dcac5d752e 2024-11-25T17:08:59,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:59,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554599745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:59,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:59,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554599749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:59,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554599763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:59,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742069_1245 (size=12001) 2024-11-25T17:08:59,793 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/C of 26bdcc7959673ac8abf209b84227d813 into c8332d5bd00049a68f8161dcac5d752e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:08:59,793 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:08:59,793 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/C, priority=13, startTime=1732554539588; duration=0sec 2024-11-25T17:08:59,794 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:08:59,794 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:C 2024-11-25T17:08:59,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554599794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:59,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:59,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554599801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:59,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554599873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:59,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554599873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:59,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554599874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:59,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554599906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:08:59,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:08:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554599917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554600078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554600078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554600078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554600108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554600120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,180 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/d114e9c7b9c840b48840b67ae31596f9 2024-11-25T17:09:00,189 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-25T17:09:00,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/2e10103dbade4fa6907aa2301f975daf is 50, key is test_row_0/B:col10/1732554538598/Put/seqid=0 2024-11-25T17:09:00,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742070_1246 (size=12001) 2024-11-25T17:09:00,213 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/2e10103dbade4fa6907aa2301f975daf 2024-11-25T17:09:00,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/ecf8c24509c342abb3c4ccef6408c97e is 50, key is test_row_0/C:col10/1732554538598/Put/seqid=0 2024-11-25T17:09:00,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742071_1247 (size=12001) 2024-11-25T17:09:00,253 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/ecf8c24509c342abb3c4ccef6408c97e 2024-11-25T17:09:00,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/d114e9c7b9c840b48840b67ae31596f9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/d114e9c7b9c840b48840b67ae31596f9 2024-11-25T17:09:00,264 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/d114e9c7b9c840b48840b67ae31596f9, entries=150, sequenceid=78, filesize=11.7 K 2024-11-25T17:09:00,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/2e10103dbade4fa6907aa2301f975daf as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/2e10103dbade4fa6907aa2301f975daf 2024-11-25T17:09:00,270 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/2e10103dbade4fa6907aa2301f975daf, entries=150, sequenceid=78, filesize=11.7 K 2024-11-25T17:09:00,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/ecf8c24509c342abb3c4ccef6408c97e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/ecf8c24509c342abb3c4ccef6408c97e 2024-11-25T17:09:00,275 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/ecf8c24509c342abb3c4ccef6408c97e, entries=150, sequenceid=78, filesize=11.7 K 2024-11-25T17:09:00,276 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 26bdcc7959673ac8abf209b84227d813 in 567ms, sequenceid=78, compaction requested=false 2024-11-25T17:09:00,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:00,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:00,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-25T17:09:00,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-25T17:09:00,288 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-25T17:09:00,288 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0880 sec 2024-11-25T17:09:00,290 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 3.0920 sec 2024-11-25T17:09:00,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:00,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-25T17:09:00,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:00,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:00,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:00,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:00,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:00,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:00,395 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/34e469da643d4bd29c0c52d903cc8d9d is 50, key is test_row_0/A:col10/1732554540384/Put/seqid=0 2024-11-25T17:09:00,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742072_1248 (size=12001) 2024-11-25T17:09:00,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554600415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554600417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554600421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554600422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554600425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554600522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554600522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554600530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554600724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554600725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554600737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,803 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/34e469da643d4bd29c0c52d903cc8d9d 2024-11-25T17:09:00,815 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/dfe60920977d412a92c274de6b4d63eb is 50, key is test_row_0/B:col10/1732554540384/Put/seqid=0 2024-11-25T17:09:00,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742073_1249 (size=12001) 2024-11-25T17:09:00,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554600928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:00,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:00,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554600930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:01,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:01,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554601027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:01,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:01,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554601028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:01,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:01,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554601040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:01,222 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/dfe60920977d412a92c274de6b4d63eb 2024-11-25T17:09:01,230 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/66c8eb88b63842adb7c371ab20722732 is 50, key is test_row_0/C:col10/1732554540384/Put/seqid=0 2024-11-25T17:09:01,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742074_1250 (size=12001) 2024-11-25T17:09:01,244 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/66c8eb88b63842adb7c371ab20722732 2024-11-25T17:09:01,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/34e469da643d4bd29c0c52d903cc8d9d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/34e469da643d4bd29c0c52d903cc8d9d 2024-11-25T17:09:01,254 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/34e469da643d4bd29c0c52d903cc8d9d, entries=150, sequenceid=94, filesize=11.7 K 2024-11-25T17:09:01,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/dfe60920977d412a92c274de6b4d63eb as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/dfe60920977d412a92c274de6b4d63eb 2024-11-25T17:09:01,259 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/dfe60920977d412a92c274de6b4d63eb, entries=150, sequenceid=94, filesize=11.7 K 2024-11-25T17:09:01,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/66c8eb88b63842adb7c371ab20722732 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/66c8eb88b63842adb7c371ab20722732 2024-11-25T17:09:01,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/66c8eb88b63842adb7c371ab20722732, entries=150, sequenceid=94, filesize=11.7 K 2024-11-25T17:09:01,267 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 26bdcc7959673ac8abf209b84227d813 in 883ms, sequenceid=94, compaction requested=true 2024-11-25T17:09:01,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:01,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:01,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:01,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:01,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:01,267 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:01,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:01,267 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:01,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:01,268 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:01,268 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/B is initiating minor compaction (all files) 2024-11-25T17:09:01,268 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/B in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:01,268 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/ed7434604c924bfdb0e3d60c5ccbe2b7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/2e10103dbade4fa6907aa2301f975daf, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/dfe60920977d412a92c274de6b4d63eb] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=35.3 K 2024-11-25T17:09:01,269 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:01,269 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/A is initiating minor compaction (all files) 2024-11-25T17:09:01,269 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/A in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:01,269 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/1b6fac69135a441e935e55b9607158e2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/d114e9c7b9c840b48840b67ae31596f9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/34e469da643d4bd29c0c52d903cc8d9d] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=35.3 K 2024-11-25T17:09:01,269 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting ed7434604c924bfdb0e3d60c5ccbe2b7, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732554537945 2024-11-25T17:09:01,269 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b6fac69135a441e935e55b9607158e2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732554537945 2024-11-25T17:09:01,270 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting d114e9c7b9c840b48840b67ae31596f9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732554538597 2024-11-25T17:09:01,270 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e10103dbade4fa6907aa2301f975daf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732554538597 2024-11-25T17:09:01,270 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34e469da643d4bd29c0c52d903cc8d9d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732554539751 2024-11-25T17:09:01,271 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting dfe60920977d412a92c274de6b4d63eb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732554539751 2024-11-25T17:09:01,291 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#B#compaction#206 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:01,291 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/8d7abe798d6745d1acc12db6cc0de79f is 50, key is test_row_0/B:col10/1732554540384/Put/seqid=0 2024-11-25T17:09:01,295 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#A#compaction#207 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:01,296 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/79c5c54f646545118f6e30b52fb2b46b is 50, key is test_row_0/A:col10/1732554540384/Put/seqid=0 2024-11-25T17:09:01,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-25T17:09:01,306 INFO [Thread-1087 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-25T17:09:01,308 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-25T17:09:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-25T17:09:01,310 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:01,310 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:01,310 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:01,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742075_1251 (size=12207) 2024-11-25T17:09:01,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742076_1252 (size=12207) 2024-11-25T17:09:01,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-25T17:09:01,462 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:01,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-25T17:09:01,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:01,464 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-25T17:09:01,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:01,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:01,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:01,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:01,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:01,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:01,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/3ab88ff231184a498f4b1c75c39c9bf5 is 50, key is test_row_0/A:col10/1732554540414/Put/seqid=0 2024-11-25T17:09:01,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742077_1253 (size=12001) 2024-11-25T17:09:01,502 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/3ab88ff231184a498f4b1c75c39c9bf5 2024-11-25T17:09:01,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/6438fec3435c44efbdbb17b22d740c56 is 50, key is test_row_0/B:col10/1732554540414/Put/seqid=0 2024-11-25T17:09:01,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742078_1254 (size=12001) 2024-11-25T17:09:01,541 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/6438fec3435c44efbdbb17b22d740c56 2024-11-25T17:09:01,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/b88c02def92f43a1852f5eb1258aeb75 is 50, key is test_row_0/C:col10/1732554540414/Put/seqid=0 2024-11-25T17:09:01,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:01,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:01,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742079_1255 (size=12001) 2024-11-25T17:09:01,564 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/b88c02def92f43a1852f5eb1258aeb75 2024-11-25T17:09:01,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/3ab88ff231184a498f4b1c75c39c9bf5 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3ab88ff231184a498f4b1c75c39c9bf5 2024-11-25T17:09:01,574 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3ab88ff231184a498f4b1c75c39c9bf5, entries=150, sequenceid=115, filesize=11.7 K 2024-11-25T17:09:01,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/6438fec3435c44efbdbb17b22d740c56 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6438fec3435c44efbdbb17b22d740c56 2024-11-25T17:09:01,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:01,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554601575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:01,580 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6438fec3435c44efbdbb17b22d740c56, entries=150, sequenceid=115, filesize=11.7 K 2024-11-25T17:09:01,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:01,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554601577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:01,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:01,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554601577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:01,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/b88c02def92f43a1852f5eb1258aeb75 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/b88c02def92f43a1852f5eb1258aeb75 2024-11-25T17:09:01,592 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/b88c02def92f43a1852f5eb1258aeb75, entries=150, sequenceid=115, filesize=11.7 K 2024-11-25T17:09:01,593 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 26bdcc7959673ac8abf209b84227d813 in 129ms, sequenceid=115, compaction requested=true 2024-11-25T17:09:01,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:01,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:01,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-25T17:09:01,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-25T17:09:01,596 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-25T17:09:01,596 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 285 msec 2024-11-25T17:09:01,601 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 289 msec 2024-11-25T17:09:01,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-25T17:09:01,613 INFO [Thread-1087 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-25T17:09:01,615 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:01,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-25T17:09:01,621 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:01,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-25T17:09:01,629 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:01,629 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:01,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:01,684 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-25T17:09:01,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:01,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:01,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:01,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:01,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:01,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:01,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/5386ab1681834bc6b1ce3a306de18e18 is 50, key is test_row_0/A:col10/1732554541576/Put/seqid=0 2024-11-25T17:09:01,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:01,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554601712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:01,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:01,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554601714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:01,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:01,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554601715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:01,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-25T17:09:01,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742080_1256 (size=12101) 2024-11-25T17:09:01,740 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/8d7abe798d6745d1acc12db6cc0de79f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/8d7abe798d6745d1acc12db6cc0de79f 2024-11-25T17:09:01,750 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/B of 26bdcc7959673ac8abf209b84227d813 into 8d7abe798d6745d1acc12db6cc0de79f(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:01,750 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:01,750 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/B, priority=13, startTime=1732554541267; duration=0sec 2024-11-25T17:09:01,750 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:01,750 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:B 2024-11-25T17:09:01,750 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:01,752 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:01,752 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/C is initiating minor compaction (all files) 2024-11-25T17:09:01,752 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/C in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:01,752 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c8332d5bd00049a68f8161dcac5d752e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/ecf8c24509c342abb3c4ccef6408c97e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/66c8eb88b63842adb7c371ab20722732, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/b88c02def92f43a1852f5eb1258aeb75] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=47.0 K 2024-11-25T17:09:01,753 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting c8332d5bd00049a68f8161dcac5d752e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732554537945 2024-11-25T17:09:01,753 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting ecf8c24509c342abb3c4ccef6408c97e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732554538597 2024-11-25T17:09:01,754 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 66c8eb88b63842adb7c371ab20722732, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732554539751 2024-11-25T17:09:01,754 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting b88c02def92f43a1852f5eb1258aeb75, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732554540407 2024-11-25T17:09:01,760 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/79c5c54f646545118f6e30b52fb2b46b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/79c5c54f646545118f6e30b52fb2b46b 2024-11-25T17:09:01,767 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/A of 26bdcc7959673ac8abf209b84227d813 into 79c5c54f646545118f6e30b52fb2b46b(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:01,767 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:01,767 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/A, priority=13, startTime=1732554541267; duration=0sec 2024-11-25T17:09:01,767 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:01,767 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:A 2024-11-25T17:09:01,780 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#C#compaction#212 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:01,781 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/d1aec28f3bb64ba895cfbd4f9dac09f2 is 50, key is test_row_0/C:col10/1732554540414/Put/seqid=0 2024-11-25T17:09:01,794 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:01,795 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-25T17:09:01,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:01,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:01,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:01,795 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:01,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:01,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:01,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:01,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554601816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:01,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:01,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554601820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:01,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:01,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554601820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:01,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742081_1257 (size=12241) 2024-11-25T17:09:01,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-25T17:09:01,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:01,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554601934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:01,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:01,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554601935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:01,948 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:01,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-25T17:09:01,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:01,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:01,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:01,949 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:01,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:01,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:02,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:02,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554602021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:02,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:02,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554602022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:02,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:02,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554602025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:02,101 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:02,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-25T17:09:02,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:02,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:02,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:02,102 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:02,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:02,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:02,124 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/5386ab1681834bc6b1ce3a306de18e18 2024-11-25T17:09:02,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/68a2f54e53264b0cb3277eb48189e4d2 is 50, key is test_row_0/B:col10/1732554541576/Put/seqid=0 2024-11-25T17:09:02,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742082_1258 (size=12101) 2024-11-25T17:09:02,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/68a2f54e53264b0cb3277eb48189e4d2 2024-11-25T17:09:02,159 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/3bcab74e0ad4453e8a6f9ef10bfab143 is 50, key is test_row_0/C:col10/1732554541576/Put/seqid=0 2024-11-25T17:09:02,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742083_1259 (size=12101) 2024-11-25T17:09:02,184 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/3bcab74e0ad4453e8a6f9ef10bfab143 2024-11-25T17:09:02,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/5386ab1681834bc6b1ce3a306de18e18 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/5386ab1681834bc6b1ce3a306de18e18 2024-11-25T17:09:02,196 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/5386ab1681834bc6b1ce3a306de18e18, entries=150, sequenceid=132, filesize=11.8 K 2024-11-25T17:09:02,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/68a2f54e53264b0cb3277eb48189e4d2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/68a2f54e53264b0cb3277eb48189e4d2 2024-11-25T17:09:02,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/68a2f54e53264b0cb3277eb48189e4d2, entries=150, sequenceid=132, filesize=11.8 K 2024-11-25T17:09:02,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/3bcab74e0ad4453e8a6f9ef10bfab143 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3bcab74e0ad4453e8a6f9ef10bfab143 2024-11-25T17:09:02,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3bcab74e0ad4453e8a6f9ef10bfab143, entries=150, sequenceid=132, filesize=11.8 K 2024-11-25T17:09:02,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 26bdcc7959673ac8abf209b84227d813 in 522ms, sequenceid=132, compaction requested=true 2024-11-25T17:09:02,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:02,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:A, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:02,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:02,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:B, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:02,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:02,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:02,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-25T17:09:02,206 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:02,207 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:02,207 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/A is initiating minor compaction (all files) 2024-11-25T17:09:02,207 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/A in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:02,207 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/79c5c54f646545118f6e30b52fb2b46b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3ab88ff231184a498f4b1c75c39c9bf5, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/5386ab1681834bc6b1ce3a306de18e18] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=35.5 K 2024-11-25T17:09:02,208 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79c5c54f646545118f6e30b52fb2b46b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732554539751 2024-11-25T17:09:02,208 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ab88ff231184a498f4b1c75c39c9bf5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732554540407 2024-11-25T17:09:02,209 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5386ab1681834bc6b1ce3a306de18e18, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732554541571 2024-11-25T17:09:02,219 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#A#compaction#215 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:02,220 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/13587035cdb44b8b82ab64e2d1a3df71 is 50, key is test_row_0/A:col10/1732554541576/Put/seqid=0 2024-11-25T17:09:02,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-25T17:09:02,232 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/d1aec28f3bb64ba895cfbd4f9dac09f2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/d1aec28f3bb64ba895cfbd4f9dac09f2 2024-11-25T17:09:02,237 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/C of 26bdcc7959673ac8abf209b84227d813 into d1aec28f3bb64ba895cfbd4f9dac09f2(size=12.0 K), total size for store is 23.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:02,237 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:02,237 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/C, priority=12, startTime=1732554541267; duration=0sec 2024-11-25T17:09:02,237 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-25T17:09:02,237 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:C 2024-11-25T17:09:02,237 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:C 2024-11-25T17:09:02,237 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:02,239 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:02,239 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/B is initiating minor compaction (all files) 2024-11-25T17:09:02,239 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/B in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:02,239 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/8d7abe798d6745d1acc12db6cc0de79f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6438fec3435c44efbdbb17b22d740c56, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/68a2f54e53264b0cb3277eb48189e4d2] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=35.5 K 2024-11-25T17:09:02,240 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d7abe798d6745d1acc12db6cc0de79f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732554539751 2024-11-25T17:09:02,241 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 6438fec3435c44efbdbb17b22d740c56, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732554540407 2024-11-25T17:09:02,241 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 68a2f54e53264b0cb3277eb48189e4d2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732554541571 2024-11-25T17:09:02,257 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:02,257 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-25T17:09:02,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:02,258 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-25T17:09:02,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:02,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:02,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:02,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:02,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:02,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:02,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742084_1260 (size=12409) 2024-11-25T17:09:02,270 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#B#compaction#216 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:02,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/6c00f41f10694df3a72ddfddf0b86ed7 is 50, key is test_row_0/A:col10/1732554541712/Put/seqid=0 2024-11-25T17:09:02,284 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/eadce6ea6bc54e569040d61e114720bf is 50, key is test_row_0/B:col10/1732554541576/Put/seqid=0 2024-11-25T17:09:02,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742086_1262 (size=12151) 2024-11-25T17:09:02,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742085_1261 (size=12409) 2024-11-25T17:09:02,323 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/eadce6ea6bc54e569040d61e114720bf as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/eadce6ea6bc54e569040d61e114720bf 2024-11-25T17:09:02,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:02,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:02,328 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/B of 26bdcc7959673ac8abf209b84227d813 into eadce6ea6bc54e569040d61e114720bf(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:02,328 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:02,328 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/B, priority=13, startTime=1732554542206; duration=0sec 2024-11-25T17:09:02,328 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:02,328 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:B 2024-11-25T17:09:02,329 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-25T17:09:02,330 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-25T17:09:02,330 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-25T17:09:02,330 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. because compaction request was cancelled 2024-11-25T17:09:02,330 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:C 2024-11-25T17:09:02,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:02,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554602345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:02,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:02,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554602346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:02,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:02,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554602348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:02,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:02,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554602451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:02,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:02,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554602452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:02,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:02,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554602454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:02,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:02,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554602653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:02,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:02,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554602655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:02,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:02,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554602657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:02,673 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/13587035cdb44b8b82ab64e2d1a3df71 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/13587035cdb44b8b82ab64e2d1a3df71 2024-11-25T17:09:02,679 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/A of 26bdcc7959673ac8abf209b84227d813 into 13587035cdb44b8b82ab64e2d1a3df71(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:02,679 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:02,679 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/A, priority=13, startTime=1732554542206; duration=0sec 2024-11-25T17:09:02,679 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:02,679 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:A 2024-11-25T17:09:02,717 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/6c00f41f10694df3a72ddfddf0b86ed7 2024-11-25T17:09:02,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/1d213c70416342dea03efcacb6dae485 is 50, key is test_row_0/B:col10/1732554541712/Put/seqid=0 2024-11-25T17:09:02,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-25T17:09:02,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742087_1263 (size=12151) 2024-11-25T17:09:02,751 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/1d213c70416342dea03efcacb6dae485 2024-11-25T17:09:02,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/877e2a4f130447579b6b101166d953a9 is 50, key is test_row_0/C:col10/1732554541712/Put/seqid=0 2024-11-25T17:09:02,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742088_1264 (size=12151) 2024-11-25T17:09:02,786 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/877e2a4f130447579b6b101166d953a9 2024-11-25T17:09:02,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/6c00f41f10694df3a72ddfddf0b86ed7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/6c00f41f10694df3a72ddfddf0b86ed7 2024-11-25T17:09:02,796 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/6c00f41f10694df3a72ddfddf0b86ed7, entries=150, sequenceid=154, filesize=11.9 K 2024-11-25T17:09:02,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/1d213c70416342dea03efcacb6dae485 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/1d213c70416342dea03efcacb6dae485 2024-11-25T17:09:02,802 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/1d213c70416342dea03efcacb6dae485, entries=150, sequenceid=154, filesize=11.9 K 2024-11-25T17:09:02,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/877e2a4f130447579b6b101166d953a9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/877e2a4f130447579b6b101166d953a9 2024-11-25T17:09:02,807 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/877e2a4f130447579b6b101166d953a9, entries=150, sequenceid=154, filesize=11.9 K 2024-11-25T17:09:02,808 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 26bdcc7959673ac8abf209b84227d813 in 550ms, sequenceid=154, compaction requested=true 2024-11-25T17:09:02,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:02,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:02,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-25T17:09:02,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-25T17:09:02,811 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-25T17:09:02,811 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1800 sec 2024-11-25T17:09:02,813 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.1970 sec 2024-11-25T17:09:02,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:02,960 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-25T17:09:02,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:02,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:02,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:02,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:02,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:02,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:02,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/2fb6eaa0c6714c10865e525e4f0495fe is 50, key is test_row_0/A:col10/1732554542332/Put/seqid=0 2024-11-25T17:09:02,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742089_1265 (size=14541) 2024-11-25T17:09:02,976 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/2fb6eaa0c6714c10865e525e4f0495fe 2024-11-25T17:09:02,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:02,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554602979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:02,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:02,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554602982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:02,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:02,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554602984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:02,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/386b6cf7f70c405290d4c40ecc8c7519 is 50, key is test_row_0/B:col10/1732554542332/Put/seqid=0 2024-11-25T17:09:03,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742090_1266 (size=12151) 2024-11-25T17:09:03,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:03,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554603088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:03,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:03,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554603090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:03,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:03,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554603090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:03,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:03,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554603290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:03,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:03,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554603294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:03,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:03,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554603295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:03,409 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/386b6cf7f70c405290d4c40ecc8c7519 2024-11-25T17:09:03,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/3a8f379f773143dca8b201f7303b7ae2 is 50, key is test_row_0/C:col10/1732554542332/Put/seqid=0 2024-11-25T17:09:03,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742091_1267 (size=12151) 2024-11-25T17:09:03,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:03,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554603594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:03,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:03,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554603598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:03,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:03,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554603598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:03,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-25T17:09:03,727 INFO [Thread-1087 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-25T17:09:03,729 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:03,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-25T17:09:03,730 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:03,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-25T17:09:03,733 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:03,733 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:03,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-25T17:09:03,861 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/3a8f379f773143dca8b201f7303b7ae2 2024-11-25T17:09:03,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/2fb6eaa0c6714c10865e525e4f0495fe as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/2fb6eaa0c6714c10865e525e4f0495fe 2024-11-25T17:09:03,873 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/2fb6eaa0c6714c10865e525e4f0495fe, entries=200, sequenceid=174, filesize=14.2 K 2024-11-25T17:09:03,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/386b6cf7f70c405290d4c40ecc8c7519 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/386b6cf7f70c405290d4c40ecc8c7519 2024-11-25T17:09:03,879 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/386b6cf7f70c405290d4c40ecc8c7519, entries=150, sequenceid=174, filesize=11.9 K 2024-11-25T17:09:03,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/3a8f379f773143dca8b201f7303b7ae2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3a8f379f773143dca8b201f7303b7ae2 2024-11-25T17:09:03,885 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:03,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3a8f379f773143dca8b201f7303b7ae2, entries=150, sequenceid=174, filesize=11.9 K 2024-11-25T17:09:03,885 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-25T17:09:03,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:03,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:03,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:03,886 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:03,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:03,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:03,887 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 26bdcc7959673ac8abf209b84227d813 in 927ms, sequenceid=174, compaction requested=true 2024-11-25T17:09:03,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:03,887 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:03,889 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39101 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:03,889 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/A is initiating minor compaction (all files) 2024-11-25T17:09:03,889 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/A in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:03,889 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/13587035cdb44b8b82ab64e2d1a3df71, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/6c00f41f10694df3a72ddfddf0b86ed7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/2fb6eaa0c6714c10865e525e4f0495fe] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=38.2 K 2024-11-25T17:09:03,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:03,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:03,889 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:03,889 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13587035cdb44b8b82ab64e2d1a3df71, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732554541571 2024-11-25T17:09:03,890 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:03,890 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c00f41f10694df3a72ddfddf0b86ed7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732554541710 2024-11-25T17:09:03,891 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:03,891 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/B is initiating minor compaction (all files) 2024-11-25T17:09:03,891 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/B in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:03,891 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/eadce6ea6bc54e569040d61e114720bf, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/1d213c70416342dea03efcacb6dae485, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/386b6cf7f70c405290d4c40ecc8c7519] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=35.9 K 2024-11-25T17:09:03,891 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2fb6eaa0c6714c10865e525e4f0495fe, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732554542332 2024-11-25T17:09:03,891 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting eadce6ea6bc54e569040d61e114720bf, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732554541571 2024-11-25T17:09:03,892 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d213c70416342dea03efcacb6dae485, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732554541710 2024-11-25T17:09:03,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:03,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:03,892 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 386b6cf7f70c405290d4c40ecc8c7519, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732554542332 2024-11-25T17:09:03,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:03,912 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#A#compaction#223 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:03,912 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/1c6ff3f8a8784c86b931e31fad0bbd5a is 50, key is test_row_0/A:col10/1732554542332/Put/seqid=0 2024-11-25T17:09:03,915 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#B#compaction#224 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:03,916 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/07d0200ffd914364ae62afab7ab6f7c9 is 50, key is test_row_0/B:col10/1732554542332/Put/seqid=0 2024-11-25T17:09:03,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742092_1268 (size=12561) 2024-11-25T17:09:03,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742093_1269 (size=12561) 2024-11-25T17:09:03,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:03,953 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-25T17:09:03,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:03,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:03,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:03,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:03,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:03,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:03,958 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/e6f9aaf336b34de2a3f9f4c33bdae342 is 50, key is test_row_0/A:col10/1732554543951/Put/seqid=0 2024-11-25T17:09:03,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742094_1270 (size=12151) 2024-11-25T17:09:03,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:03,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554603985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:03,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:03,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554603986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:04,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-25T17:09:04,037 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:04,037 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-25T17:09:04,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:04,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:04,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:04,038 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:04,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554604088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:04,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:04,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554604089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:04,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:04,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554604098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:04,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:04,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554604102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:04,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:04,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554604103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:04,190 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:04,191 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-25T17:09:04,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:04,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:04,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:04,191 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:04,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554604291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:04,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:04,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554604292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:04,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-25T17:09:04,344 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/1c6ff3f8a8784c86b931e31fad0bbd5a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/1c6ff3f8a8784c86b931e31fad0bbd5a 2024-11-25T17:09:04,347 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:04,347 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-25T17:09:04,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:04,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:04,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:04,348 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,351 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/A of 26bdcc7959673ac8abf209b84227d813 into 1c6ff3f8a8784c86b931e31fad0bbd5a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:04,351 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:04,351 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/A, priority=13, startTime=1732554543887; duration=0sec 2024-11-25T17:09:04,351 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:04,351 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:A 2024-11-25T17:09:04,351 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:04,357 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:04,357 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/C is initiating minor compaction (all files) 2024-11-25T17:09:04,357 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/C in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:04,357 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/d1aec28f3bb64ba895cfbd4f9dac09f2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3bcab74e0ad4453e8a6f9ef10bfab143, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/877e2a4f130447579b6b101166d953a9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3a8f379f773143dca8b201f7303b7ae2] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=47.5 K 2024-11-25T17:09:04,358 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1aec28f3bb64ba895cfbd4f9dac09f2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732554540407 2024-11-25T17:09:04,359 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3bcab74e0ad4453e8a6f9ef10bfab143, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732554541571 2024-11-25T17:09:04,359 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 877e2a4f130447579b6b101166d953a9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1732554541710 2024-11-25T17:09:04,359 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/07d0200ffd914364ae62afab7ab6f7c9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/07d0200ffd914364ae62afab7ab6f7c9 2024-11-25T17:09:04,360 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a8f379f773143dca8b201f7303b7ae2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732554542332 2024-11-25T17:09:04,362 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/e6f9aaf336b34de2a3f9f4c33bdae342 2024-11-25T17:09:04,364 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/B of 26bdcc7959673ac8abf209b84227d813 into 07d0200ffd914364ae62afab7ab6f7c9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:04,364 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:04,364 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/B, priority=13, startTime=1732554543889; duration=0sec 2024-11-25T17:09:04,364 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:04,365 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:B 2024-11-25T17:09:04,371 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#C#compaction#226 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:04,372 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/38311d35ec7b4e40af605d626b2bc105 is 50, key is test_row_0/C:col10/1732554542332/Put/seqid=0 2024-11-25T17:09:04,374 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/76d158b1fcb44df0ae2050a647915649 is 50, key is test_row_0/B:col10/1732554543951/Put/seqid=0 2024-11-25T17:09:04,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742096_1272 (size=12151) 2024-11-25T17:09:04,440 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/76d158b1fcb44df0ae2050a647915649 2024-11-25T17:09:04,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742095_1271 (size=12527) 2024-11-25T17:09:04,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/c5e63a0118cb4b659f90dbbb5dc0517a is 50, key is test_row_0/C:col10/1732554543951/Put/seqid=0 2024-11-25T17:09:04,472 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/38311d35ec7b4e40af605d626b2bc105 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/38311d35ec7b4e40af605d626b2bc105 2024-11-25T17:09:04,483 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/C of 26bdcc7959673ac8abf209b84227d813 into 38311d35ec7b4e40af605d626b2bc105(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:04,483 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:04,483 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/C, priority=12, startTime=1732554543892; duration=0sec 2024-11-25T17:09:04,483 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:04,483 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:C 2024-11-25T17:09:04,513 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:04,514 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-25T17:09:04,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:04,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:04,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:04,514 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742097_1273 (size=12151) 2024-11-25T17:09:04,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:04,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554604594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:04,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:04,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554604597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:04,667 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:04,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-25T17:09:04,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:04,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:04,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:04,667 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,827 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:04,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-25T17:09:04,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:04,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:04,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:04,829 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:04,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-25T17:09:04,917 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/c5e63a0118cb4b659f90dbbb5dc0517a 2024-11-25T17:09:04,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/e6f9aaf336b34de2a3f9f4c33bdae342 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/e6f9aaf336b34de2a3f9f4c33bdae342 2024-11-25T17:09:04,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/e6f9aaf336b34de2a3f9f4c33bdae342, entries=150, sequenceid=194, filesize=11.9 K 2024-11-25T17:09:04,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/76d158b1fcb44df0ae2050a647915649 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/76d158b1fcb44df0ae2050a647915649 2024-11-25T17:09:04,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/76d158b1fcb44df0ae2050a647915649, entries=150, sequenceid=194, filesize=11.9 K 2024-11-25T17:09:04,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/c5e63a0118cb4b659f90dbbb5dc0517a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c5e63a0118cb4b659f90dbbb5dc0517a 2024-11-25T17:09:04,937 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c5e63a0118cb4b659f90dbbb5dc0517a, entries=150, sequenceid=194, filesize=11.9 K 2024-11-25T17:09:04,938 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 26bdcc7959673ac8abf209b84227d813 in 986ms, sequenceid=194, compaction requested=false 2024-11-25T17:09:04,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:04,987 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:04,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-25T17:09:04,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:04,987 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-25T17:09:04,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:04,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:04,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:04,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:04,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:04,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:04,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/676d31c89c164463b4edd3677b1f576c is 50, key is test_row_0/A:col10/1732554543981/Put/seqid=0 2024-11-25T17:09:05,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742098_1274 (size=12151) 2024-11-25T17:09:05,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:05,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:05,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554605119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554605119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554605120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554605122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554605123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554605224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554605224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554605225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554605228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554605228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554605429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554605429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554605429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554605430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554605431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,433 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/676d31c89c164463b4edd3677b1f576c 2024-11-25T17:09:05,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/b09a8d2702c34939a53e2efcb1c23455 is 50, key is test_row_0/B:col10/1732554543981/Put/seqid=0 2024-11-25T17:09:05,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742099_1275 (size=12151) 2024-11-25T17:09:05,490 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/b09a8d2702c34939a53e2efcb1c23455 2024-11-25T17:09:05,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/20e96d8407e54ef9bda55ff652e8e99f is 50, key is test_row_0/C:col10/1732554543981/Put/seqid=0 2024-11-25T17:09:05,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742100_1276 (size=12151) 2024-11-25T17:09:05,568 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/20e96d8407e54ef9bda55ff652e8e99f 2024-11-25T17:09:05,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/676d31c89c164463b4edd3677b1f576c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/676d31c89c164463b4edd3677b1f576c 2024-11-25T17:09:05,599 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/676d31c89c164463b4edd3677b1f576c, entries=150, sequenceid=213, filesize=11.9 K 2024-11-25T17:09:05,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/b09a8d2702c34939a53e2efcb1c23455 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/b09a8d2702c34939a53e2efcb1c23455 2024-11-25T17:09:05,606 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/b09a8d2702c34939a53e2efcb1c23455, entries=150, sequenceid=213, filesize=11.9 K 2024-11-25T17:09:05,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/20e96d8407e54ef9bda55ff652e8e99f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/20e96d8407e54ef9bda55ff652e8e99f 2024-11-25T17:09:05,612 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/20e96d8407e54ef9bda55ff652e8e99f, entries=150, sequenceid=213, filesize=11.9 K 2024-11-25T17:09:05,621 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 26bdcc7959673ac8abf209b84227d813 in 634ms, sequenceid=213, compaction requested=true 2024-11-25T17:09:05,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:05,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:05,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-25T17:09:05,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-25T17:09:05,653 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-25T17:09:05,653 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9070 sec 2024-11-25T17:09:05,664 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.9270 sec 2024-11-25T17:09:05,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:05,737 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-25T17:09:05,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:05,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:05,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:05,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:05,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:05,739 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:05,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/9f601d62fa9d4875a59ab65c68772f26 is 50, key is test_row_0/A:col10/1732554545737/Put/seqid=0 2024-11-25T17:09:05,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742101_1277 (size=14541) 2024-11-25T17:09:05,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554605753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554605757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554605758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554605758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554605758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-25T17:09:05,839 INFO [Thread-1087 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-25T17:09:05,841 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:05,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-25T17:09:05,843 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:05,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-25T17:09:05,843 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:05,843 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:05,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554605860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554605863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554605865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554605865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:05,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554605867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:05,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-25T17:09:05,995 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:05,996 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-25T17:09:05,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:05,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:05,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:05,997 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:05,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:05,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:06,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554606063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:06,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554606066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:06,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554606068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:06,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:06,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554606068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:06,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554606072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-25T17:09:06,153 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/9f601d62fa9d4875a59ab65c68772f26 2024-11-25T17:09:06,155 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:06,155 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-25T17:09:06,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:06,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:06,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:06,156 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:06,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:06,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:06,167 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/62308cc69af447fd817241f2332bd2d4 is 50, key is test_row_0/B:col10/1732554545737/Put/seqid=0 2024-11-25T17:09:06,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742102_1278 (size=12151) 2024-11-25T17:09:06,310 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:06,311 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-25T17:09:06,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:06,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:06,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:06,311 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:06,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:06,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:06,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554606371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:06,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:06,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554606372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:06,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:06,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554606373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:06,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:06,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554606373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:06,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:06,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554606381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:06,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-25T17:09:06,464 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:06,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-25T17:09:06,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:06,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:06,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:06,465 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:06,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:06,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:06,584 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/62308cc69af447fd817241f2332bd2d4 2024-11-25T17:09:06,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/c3da0afd5bd4479ebff79ab6e8eed65e is 50, key is test_row_0/C:col10/1732554545737/Put/seqid=0 2024-11-25T17:09:06,617 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:06,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-25T17:09:06,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:06,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:06,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:06,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:06,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:06,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:06,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742103_1279 (size=12151) 2024-11-25T17:09:06,637 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/c3da0afd5bd4479ebff79ab6e8eed65e 2024-11-25T17:09:06,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/9f601d62fa9d4875a59ab65c68772f26 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/9f601d62fa9d4875a59ab65c68772f26 2024-11-25T17:09:06,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/9f601d62fa9d4875a59ab65c68772f26, entries=200, sequenceid=236, filesize=14.2 K 2024-11-25T17:09:06,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/62308cc69af447fd817241f2332bd2d4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/62308cc69af447fd817241f2332bd2d4 2024-11-25T17:09:06,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/62308cc69af447fd817241f2332bd2d4, entries=150, sequenceid=236, filesize=11.9 K 2024-11-25T17:09:06,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/c3da0afd5bd4479ebff79ab6e8eed65e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c3da0afd5bd4479ebff79ab6e8eed65e 2024-11-25T17:09:06,697 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c3da0afd5bd4479ebff79ab6e8eed65e, entries=150, sequenceid=236, filesize=11.9 K 2024-11-25T17:09:06,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 26bdcc7959673ac8abf209b84227d813 in 961ms, sequenceid=236, compaction requested=true 2024-11-25T17:09:06,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:06,699 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:06,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:06,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:06,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:06,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:06,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:06,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:06,699 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:06,701 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51404 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:06,701 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/A is initiating minor compaction (all files) 2024-11-25T17:09:06,701 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/A in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:06,701 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/1c6ff3f8a8784c86b931e31fad0bbd5a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/e6f9aaf336b34de2a3f9f4c33bdae342, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/676d31c89c164463b4edd3677b1f576c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/9f601d62fa9d4875a59ab65c68772f26] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=50.2 K 2024-11-25T17:09:06,702 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c6ff3f8a8784c86b931e31fad0bbd5a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732554542332 2024-11-25T17:09:06,702 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:06,702 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/B is initiating minor compaction (all files) 2024-11-25T17:09:06,702 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/B in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:06,702 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/07d0200ffd914364ae62afab7ab6f7c9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/76d158b1fcb44df0ae2050a647915649, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/b09a8d2702c34939a53e2efcb1c23455, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/62308cc69af447fd817241f2332bd2d4] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=47.9 K 2024-11-25T17:09:06,702 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6f9aaf336b34de2a3f9f4c33bdae342, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732554542977 2024-11-25T17:09:06,702 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 07d0200ffd914364ae62afab7ab6f7c9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732554542332 2024-11-25T17:09:06,702 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 676d31c89c164463b4edd3677b1f576c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732554543970 2024-11-25T17:09:06,703 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f601d62fa9d4875a59ab65c68772f26, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732554545121 2024-11-25T17:09:06,703 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 76d158b1fcb44df0ae2050a647915649, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732554542977 2024-11-25T17:09:06,703 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting b09a8d2702c34939a53e2efcb1c23455, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732554543970 2024-11-25T17:09:06,703 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 62308cc69af447fd817241f2332bd2d4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732554545121 2024-11-25T17:09:06,720 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#A#compaction#235 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:06,721 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/0a2b8003d9654dadb44c8702024859cb is 50, key is test_row_0/A:col10/1732554545737/Put/seqid=0 2024-11-25T17:09:06,728 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#B#compaction#236 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:06,728 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/7a512df6441843709dbd71d5f7102dfa is 50, key is test_row_0/B:col10/1732554545737/Put/seqid=0 2024-11-25T17:09:06,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742104_1280 (size=12697) 2024-11-25T17:09:06,771 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:06,772 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-25T17:09:06,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:06,772 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-25T17:09:06,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:06,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:06,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:06,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:06,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:06,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:06,776 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/0a2b8003d9654dadb44c8702024859cb as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/0a2b8003d9654dadb44c8702024859cb 2024-11-25T17:09:06,782 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/A of 26bdcc7959673ac8abf209b84227d813 into 0a2b8003d9654dadb44c8702024859cb(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:06,782 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:06,782 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/A, priority=12, startTime=1732554546698; duration=0sec 2024-11-25T17:09:06,782 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:06,782 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:A 2024-11-25T17:09:06,783 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:06,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/0bdf1c9a59f54f6aba9419af935590e0 is 50, key is test_row_0/A:col10/1732554545757/Put/seqid=0 2024-11-25T17:09:06,785 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:06,785 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/C is initiating minor compaction (all files) 2024-11-25T17:09:06,785 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/C in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:06,785 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/38311d35ec7b4e40af605d626b2bc105, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c5e63a0118cb4b659f90dbbb5dc0517a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/20e96d8407e54ef9bda55ff652e8e99f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c3da0afd5bd4479ebff79ab6e8eed65e] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=47.8 K 2024-11-25T17:09:06,786 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38311d35ec7b4e40af605d626b2bc105, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732554542332 2024-11-25T17:09:06,786 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5e63a0118cb4b659f90dbbb5dc0517a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732554542977 2024-11-25T17:09:06,786 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20e96d8407e54ef9bda55ff652e8e99f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732554543970 2024-11-25T17:09:06,788 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3da0afd5bd4479ebff79ab6e8eed65e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732554545121 2024-11-25T17:09:06,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742105_1281 (size=12697) 2024-11-25T17:09:06,808 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/7a512df6441843709dbd71d5f7102dfa as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/7a512df6441843709dbd71d5f7102dfa 2024-11-25T17:09:06,814 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/B of 26bdcc7959673ac8abf209b84227d813 into 7a512df6441843709dbd71d5f7102dfa(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:06,814 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:06,814 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/B, priority=12, startTime=1732554546699; duration=0sec 2024-11-25T17:09:06,814 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:06,814 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:B 2024-11-25T17:09:06,824 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#C#compaction#238 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:06,825 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/b9c167fd5f574b039bc6c883d2981ad0 is 50, key is test_row_0/C:col10/1732554545737/Put/seqid=0 2024-11-25T17:09:06,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742106_1282 (size=12151) 2024-11-25T17:09:06,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:06,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:06,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742107_1283 (size=12663) 2024-11-25T17:09:06,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:06,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554606934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:06,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:06,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554606936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:06,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:06,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554606937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:06,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:06,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554606938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:06,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:06,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554606938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:06,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-25T17:09:07,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554607042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554607043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554607043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554607043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554607046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554607244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554607245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554607246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554607247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554607249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,280 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/0bdf1c9a59f54f6aba9419af935590e0 2024-11-25T17:09:07,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/08883ad13f1340bab9c01dcaa8516971 is 50, key is test_row_0/B:col10/1732554545757/Put/seqid=0 2024-11-25T17:09:07,305 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/b9c167fd5f574b039bc6c883d2981ad0 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/b9c167fd5f574b039bc6c883d2981ad0 2024-11-25T17:09:07,318 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/C of 26bdcc7959673ac8abf209b84227d813 into b9c167fd5f574b039bc6c883d2981ad0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:07,318 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:07,318 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/C, priority=12, startTime=1732554546699; duration=0sec 2024-11-25T17:09:07,318 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:07,318 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:C 2024-11-25T17:09:07,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742108_1284 (size=12151) 2024-11-25T17:09:07,319 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/08883ad13f1340bab9c01dcaa8516971 2024-11-25T17:09:07,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/75e3d210f6a84fcf935d50cc6e2740e9 is 50, key is test_row_0/C:col10/1732554545757/Put/seqid=0 2024-11-25T17:09:07,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742109_1285 (size=12151) 2024-11-25T17:09:07,365 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/75e3d210f6a84fcf935d50cc6e2740e9 2024-11-25T17:09:07,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/0bdf1c9a59f54f6aba9419af935590e0 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/0bdf1c9a59f54f6aba9419af935590e0 2024-11-25T17:09:07,375 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/0bdf1c9a59f54f6aba9419af935590e0, entries=150, sequenceid=250, filesize=11.9 K 2024-11-25T17:09:07,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/08883ad13f1340bab9c01dcaa8516971 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/08883ad13f1340bab9c01dcaa8516971 2024-11-25T17:09:07,382 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/08883ad13f1340bab9c01dcaa8516971, entries=150, sequenceid=250, filesize=11.9 K 2024-11-25T17:09:07,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/75e3d210f6a84fcf935d50cc6e2740e9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/75e3d210f6a84fcf935d50cc6e2740e9 2024-11-25T17:09:07,389 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/75e3d210f6a84fcf935d50cc6e2740e9, entries=150, sequenceid=250, filesize=11.9 K 2024-11-25T17:09:07,394 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 26bdcc7959673ac8abf209b84227d813 in 622ms, sequenceid=250, compaction requested=false 2024-11-25T17:09:07,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:07,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:07,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-25T17:09:07,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-25T17:09:07,400 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-25T17:09:07,400 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5550 sec 2024-11-25T17:09:07,402 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.5600 sec 2024-11-25T17:09:07,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:07,555 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-25T17:09:07,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:07,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:07,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:07,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:07,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:07,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:07,563 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/46028e9c790c416ebe41b856cf75030f is 50, key is test_row_0/A:col10/1732554547552/Put/seqid=0 2024-11-25T17:09:07,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742110_1286 (size=12301) 2024-11-25T17:09:07,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554607575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554607575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554607575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554607576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554607577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554607686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554607686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554607686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554607686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554607686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554607902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554607902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554607902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554607902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:07,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554607902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:07,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-25T17:09:07,951 INFO [Thread-1087 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-25T17:09:07,954 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:07,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-25T17:09:07,956 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:07,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-25T17:09:07,957 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:07,958 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:07,977 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/46028e9c790c416ebe41b856cf75030f 2024-11-25T17:09:08,003 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/6471f0f011fc4b5e92de5f6118c44ade is 50, key is test_row_0/B:col10/1732554547552/Put/seqid=0 2024-11-25T17:09:08,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742111_1287 (size=12301) 2024-11-25T17:09:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-25T17:09:08,110 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:08,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-25T17:09:08,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:08,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:08,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:08,111 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:08,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:08,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:08,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554608206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:08,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554608206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:08,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554608207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:08,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554608207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:08,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554608208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:08,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-25T17:09:08,269 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:08,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-25T17:09:08,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:08,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:08,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:08,270 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:08,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:08,422 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:08,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-25T17:09:08,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:08,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:08,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:08,424 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:08,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:08,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/6471f0f011fc4b5e92de5f6118c44ade 2024-11-25T17:09:08,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/13d6883709504e0b84fdfbafdc165ac2 is 50, key is test_row_0/C:col10/1732554547552/Put/seqid=0 2024-11-25T17:09:08,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742112_1288 (size=12301) 2024-11-25T17:09:08,469 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/13d6883709504e0b84fdfbafdc165ac2 2024-11-25T17:09:08,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/46028e9c790c416ebe41b856cf75030f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/46028e9c790c416ebe41b856cf75030f 2024-11-25T17:09:08,506 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/46028e9c790c416ebe41b856cf75030f, entries=150, sequenceid=278, filesize=12.0 K 2024-11-25T17:09:08,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/6471f0f011fc4b5e92de5f6118c44ade as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6471f0f011fc4b5e92de5f6118c44ade 2024-11-25T17:09:08,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6471f0f011fc4b5e92de5f6118c44ade, entries=150, sequenceid=278, filesize=12.0 K 2024-11-25T17:09:08,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/13d6883709504e0b84fdfbafdc165ac2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/13d6883709504e0b84fdfbafdc165ac2 2024-11-25T17:09:08,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/13d6883709504e0b84fdfbafdc165ac2, entries=150, sequenceid=278, filesize=12.0 K 2024-11-25T17:09:08,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 26bdcc7959673ac8abf209b84227d813 in 979ms, sequenceid=278, compaction requested=true 2024-11-25T17:09:08,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:08,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:08,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:08,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:08,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:08,534 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:08,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:08,534 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:08,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:08,535 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:08,535 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:08,535 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/A is initiating minor compaction (all files) 2024-11-25T17:09:08,535 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/B is initiating minor compaction (all files) 2024-11-25T17:09:08,535 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/A in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:08,535 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/B in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:08,535 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/0a2b8003d9654dadb44c8702024859cb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/0bdf1c9a59f54f6aba9419af935590e0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/46028e9c790c416ebe41b856cf75030f] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=36.3 K 2024-11-25T17:09:08,535 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/7a512df6441843709dbd71d5f7102dfa, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/08883ad13f1340bab9c01dcaa8516971, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6471f0f011fc4b5e92de5f6118c44ade] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=36.3 K 2024-11-25T17:09:08,537 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a2b8003d9654dadb44c8702024859cb, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732554545121 2024-11-25T17:09:08,537 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0bdf1c9a59f54f6aba9419af935590e0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732554545750 2024-11-25T17:09:08,537 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46028e9c790c416ebe41b856cf75030f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732554546931 2024-11-25T17:09:08,540 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a512df6441843709dbd71d5f7102dfa, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732554545121 2024-11-25T17:09:08,543 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 08883ad13f1340bab9c01dcaa8516971, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732554545750 2024-11-25T17:09:08,549 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 6471f0f011fc4b5e92de5f6118c44ade, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732554546931 2024-11-25T17:09:08,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-25T17:09:08,567 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#A#compaction#244 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:08,567 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/4965ee50babd41a8a7783635b113350a is 50, key is test_row_0/A:col10/1732554547552/Put/seqid=0 2024-11-25T17:09:08,569 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#B#compaction#245 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:08,570 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/1b32c2c33d4c49688d8688f8254c5085 is 50, key is test_row_0/B:col10/1732554547552/Put/seqid=0 2024-11-25T17:09:08,575 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:08,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-25T17:09:08,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:08,576 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:09:08,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:08,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:08,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:08,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:08,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:08,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:08,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742114_1290 (size=12949) 2024-11-25T17:09:08,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742113_1289 (size=12949) 2024-11-25T17:09:08,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/127a17e92ed94d6e8842e276579f4b18 is 50, key is test_row_0/A:col10/1732554547561/Put/seqid=0 2024-11-25T17:09:08,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742115_1291 (size=12301) 2024-11-25T17:09:08,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:08,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:08,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:08,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554608740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:08,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:08,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554608741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:08,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:08,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554608744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:08,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:08,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554608744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:08,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:08,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554608745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:08,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:08,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554608846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:08,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:08,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554608848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:08,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:08,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554608849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:08,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:08,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554608850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:08,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:08,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554608868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:08,992 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/1b32c2c33d4c49688d8688f8254c5085 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/1b32c2c33d4c49688d8688f8254c5085 2024-11-25T17:09:08,993 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/4965ee50babd41a8a7783635b113350a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4965ee50babd41a8a7783635b113350a 2024-11-25T17:09:09,003 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/A of 26bdcc7959673ac8abf209b84227d813 into 4965ee50babd41a8a7783635b113350a(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:09,003 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:09,003 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/A, priority=13, startTime=1732554548534; duration=0sec 2024-11-25T17:09:09,003 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:09,003 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:A 2024-11-25T17:09:09,004 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:09,006 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/127a17e92ed94d6e8842e276579f4b18 2024-11-25T17:09:09,006 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:09,006 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/C is initiating minor compaction (all files) 2024-11-25T17:09:09,006 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/C in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:09,006 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/b9c167fd5f574b039bc6c883d2981ad0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/75e3d210f6a84fcf935d50cc6e2740e9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/13d6883709504e0b84fdfbafdc165ac2] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=36.2 K 2024-11-25T17:09:09,007 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9c167fd5f574b039bc6c883d2981ad0, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732554545121 2024-11-25T17:09:09,007 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/B of 26bdcc7959673ac8abf209b84227d813 into 1b32c2c33d4c49688d8688f8254c5085(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:09,007 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:09,007 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/B, priority=13, startTime=1732554548534; duration=0sec 2024-11-25T17:09:09,007 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:09,007 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:B 2024-11-25T17:09:09,007 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75e3d210f6a84fcf935d50cc6e2740e9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732554545750 2024-11-25T17:09:09,008 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13d6883709504e0b84fdfbafdc165ac2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732554546931 2024-11-25T17:09:09,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/3a08b143bb834c7faa07b99cf9d31dbf is 50, key is test_row_0/B:col10/1732554547561/Put/seqid=0 2024-11-25T17:09:09,050 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#C#compaction#248 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:09,051 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/29ed1a5835814f04839c3541c7eb84f9 is 50, key is test_row_0/C:col10/1732554547552/Put/seqid=0 2024-11-25T17:09:09,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554609049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742116_1292 (size=12301) 2024-11-25T17:09:09,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,056 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/3a08b143bb834c7faa07b99cf9d31dbf 2024-11-25T17:09:09,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554609054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554609054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554609054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-25T17:09:09,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554609072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742117_1293 (size=12915) 2024-11-25T17:09:09,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/4cc73c7f7a7b4b31927dd0b5328ec5c0 is 50, key is test_row_0/C:col10/1732554547561/Put/seqid=0 2024-11-25T17:09:09,087 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/29ed1a5835814f04839c3541c7eb84f9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/29ed1a5835814f04839c3541c7eb84f9 2024-11-25T17:09:09,093 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/C of 26bdcc7959673ac8abf209b84227d813 into 29ed1a5835814f04839c3541c7eb84f9(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:09,093 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:09,093 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/C, priority=13, startTime=1732554548534; duration=0sec 2024-11-25T17:09:09,093 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:09,093 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:C 2024-11-25T17:09:09,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742118_1294 (size=12301) 2024-11-25T17:09:09,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554609352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554609358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554609358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554609359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554609375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,506 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/4cc73c7f7a7b4b31927dd0b5328ec5c0 2024-11-25T17:09:09,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/127a17e92ed94d6e8842e276579f4b18 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/127a17e92ed94d6e8842e276579f4b18 2024-11-25T17:09:09,534 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/127a17e92ed94d6e8842e276579f4b18, entries=150, sequenceid=289, filesize=12.0 K 2024-11-25T17:09:09,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/3a08b143bb834c7faa07b99cf9d31dbf as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/3a08b143bb834c7faa07b99cf9d31dbf 2024-11-25T17:09:09,543 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/3a08b143bb834c7faa07b99cf9d31dbf, entries=150, sequenceid=289, filesize=12.0 K 2024-11-25T17:09:09,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/4cc73c7f7a7b4b31927dd0b5328ec5c0 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/4cc73c7f7a7b4b31927dd0b5328ec5c0 2024-11-25T17:09:09,561 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/4cc73c7f7a7b4b31927dd0b5328ec5c0, entries=150, sequenceid=289, filesize=12.0 K 2024-11-25T17:09:09,565 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 26bdcc7959673ac8abf209b84227d813 in 989ms, sequenceid=289, compaction requested=false 2024-11-25T17:09:09,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:09,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:09,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-25T17:09:09,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-25T17:09:09,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-25T17:09:09,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6280 sec 2024-11-25T17:09:09,599 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.6440 sec 2024-11-25T17:09:09,863 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-25T17:09:09,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:09,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:09,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:09,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:09,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:09,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:09,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:09,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/8e000e8e438c424f8cb4375ac013da08 is 50, key is test_row_0/A:col10/1732554549860/Put/seqid=0 2024-11-25T17:09:09,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554609870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554609872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,877 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554609874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,877 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554609874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,883 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554609882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742119_1295 (size=14741) 2024-11-25T17:09:09,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554609978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554609978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554609987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:09,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:09,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554609988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-25T17:09:10,061 INFO [Thread-1087 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-25T17:09:10,063 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:10,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-25T17:09:10,070 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:10,071 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:10,071 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:10,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-25T17:09:10,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-25T17:09:10,194 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554610191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554610193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554610193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554610197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,224 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:10,225 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-25T17:09:10,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:10,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:10,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:10,226 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:10,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:10,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:10,322 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/8e000e8e438c424f8cb4375ac013da08 2024-11-25T17:09:10,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/3239456c0e1444ffbb9ce1ff627a9f7a is 50, key is test_row_0/B:col10/1732554549860/Put/seqid=0 2024-11-25T17:09:10,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742120_1296 (size=12301) 2024-11-25T17:09:10,365 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/3239456c0e1444ffbb9ce1ff627a9f7a 2024-11-25T17:09:10,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-25T17:09:10,380 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:10,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-25T17:09:10,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:10,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:10,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:10,381 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:10,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:10,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:10,395 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/f0bbda05a30b413695f1666557c8e9d5 is 50, key is test_row_0/C:col10/1732554549860/Put/seqid=0 2024-11-25T17:09:10,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742121_1297 (size=12301) 2024-11-25T17:09:10,412 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/f0bbda05a30b413695f1666557c8e9d5 2024-11-25T17:09:10,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/8e000e8e438c424f8cb4375ac013da08 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/8e000e8e438c424f8cb4375ac013da08 2024-11-25T17:09:10,430 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/8e000e8e438c424f8cb4375ac013da08, entries=200, sequenceid=318, filesize=14.4 K 2024-11-25T17:09:10,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/3239456c0e1444ffbb9ce1ff627a9f7a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/3239456c0e1444ffbb9ce1ff627a9f7a 2024-11-25T17:09:10,443 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/3239456c0e1444ffbb9ce1ff627a9f7a, entries=150, sequenceid=318, filesize=12.0 K 2024-11-25T17:09:10,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/f0bbda05a30b413695f1666557c8e9d5 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/f0bbda05a30b413695f1666557c8e9d5 2024-11-25T17:09:10,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/f0bbda05a30b413695f1666557c8e9d5, entries=150, sequenceid=318, filesize=12.0 K 2024-11-25T17:09:10,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 26bdcc7959673ac8abf209b84227d813 in 594ms, sequenceid=318, compaction requested=true 2024-11-25T17:09:10,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:10,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:10,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:10,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:10,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-25T17:09:10,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:10,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-25T17:09:10,457 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:10,457 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:10,458 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39991 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:10,458 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/A is initiating minor compaction (all files) 2024-11-25T17:09:10,458 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/A in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:10,458 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4965ee50babd41a8a7783635b113350a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/127a17e92ed94d6e8842e276579f4b18, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/8e000e8e438c424f8cb4375ac013da08] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=39.1 K 2024-11-25T17:09:10,458 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:10,458 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/C is initiating minor compaction (all files) 2024-11-25T17:09:10,458 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/C in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:10,458 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/29ed1a5835814f04839c3541c7eb84f9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/4cc73c7f7a7b4b31927dd0b5328ec5c0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/f0bbda05a30b413695f1666557c8e9d5] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=36.6 K 2024-11-25T17:09:10,459 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 4965ee50babd41a8a7783635b113350a, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732554546931 2024-11-25T17:09:10,459 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29ed1a5835814f04839c3541c7eb84f9, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732554546931 2024-11-25T17:09:10,459 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 127a17e92ed94d6e8842e276579f4b18, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732554547561 2024-11-25T17:09:10,459 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4cc73c7f7a7b4b31927dd0b5328ec5c0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732554547561 2024-11-25T17:09:10,459 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e000e8e438c424f8cb4375ac013da08, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732554548741 2024-11-25T17:09:10,459 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0bbda05a30b413695f1666557c8e9d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732554548741 2024-11-25T17:09:10,475 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#A#compaction#253 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:10,476 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/6a36279ee2cb488db2851eca21378544 is 50, key is test_row_0/A:col10/1732554549860/Put/seqid=0 2024-11-25T17:09:10,487 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#C#compaction#254 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:10,488 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/51ca08ad167547689d77f40b93d0f3df is 50, key is test_row_0/C:col10/1732554549860/Put/seqid=0 2024-11-25T17:09:10,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:10,505 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-25T17:09:10,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:10,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:10,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:10,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:10,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:10,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:10,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742122_1298 (size=13051) 2024-11-25T17:09:10,537 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:10,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-25T17:09:10,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:10,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:10,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:10,541 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:10,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:10,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:10,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/4b3a57f9f84b400d95d26268e98d8330 is 50, key is test_row_0/A:col10/1732554550504/Put/seqid=0 2024-11-25T17:09:10,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742123_1299 (size=13017) 2024-11-25T17:09:10,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742124_1300 (size=12301) 2024-11-25T17:09:10,580 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/4b3a57f9f84b400d95d26268e98d8330 2024-11-25T17:09:10,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554610575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554610579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554610581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554610582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,594 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/cd9abfb2353b40ac8d166fa94f5fe807 is 50, key is test_row_0/B:col10/1732554550504/Put/seqid=0 2024-11-25T17:09:10,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742125_1301 (size=12301) 2024-11-25T17:09:10,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-25T17:09:10,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554610685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554610691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554610691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554610693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,704 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:10,705 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-25T17:09:10,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:10,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:10,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:10,708 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:10,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:10,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:10,860 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:10,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-25T17:09:10,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:10,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:10,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:10,861 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:10,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:10,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:10,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554610888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,898 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554610895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,899 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554610896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554610896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:10,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554610903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:10,929 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/6a36279ee2cb488db2851eca21378544 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/6a36279ee2cb488db2851eca21378544 2024-11-25T17:09:10,940 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/A of 26bdcc7959673ac8abf209b84227d813 into 6a36279ee2cb488db2851eca21378544(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:10,940 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:10,940 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/A, priority=13, startTime=1732554550456; duration=0sec 2024-11-25T17:09:10,941 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:10,941 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:A 2024-11-25T17:09:10,941 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:10,942 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:10,942 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/B is initiating minor compaction (all files) 2024-11-25T17:09:10,942 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/B in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:10,942 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/1b32c2c33d4c49688d8688f8254c5085, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/3a08b143bb834c7faa07b99cf9d31dbf, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/3239456c0e1444ffbb9ce1ff627a9f7a] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=36.7 K 2024-11-25T17:09:10,942 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b32c2c33d4c49688d8688f8254c5085, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732554546931 2024-11-25T17:09:10,943 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a08b143bb834c7faa07b99cf9d31dbf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732554547561 2024-11-25T17:09:10,943 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 3239456c0e1444ffbb9ce1ff627a9f7a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732554548741 2024-11-25T17:09:10,963 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#B#compaction#257 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:10,964 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/c8c0751bd37446db94b35a1059f79c1e is 50, key is test_row_0/B:col10/1732554549860/Put/seqid=0 2024-11-25T17:09:10,972 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/51ca08ad167547689d77f40b93d0f3df as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/51ca08ad167547689d77f40b93d0f3df 2024-11-25T17:09:10,982 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/C of 26bdcc7959673ac8abf209b84227d813 into 51ca08ad167547689d77f40b93d0f3df(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:10,982 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:10,982 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/C, priority=13, startTime=1732554550457; duration=0sec 2024-11-25T17:09:10,982 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:10,982 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:C 2024-11-25T17:09:11,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742126_1302 (size=13051) 2024-11-25T17:09:11,011 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/c8c0751bd37446db94b35a1059f79c1e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/c8c0751bd37446db94b35a1059f79c1e 2024-11-25T17:09:11,017 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:11,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-25T17:09:11,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:11,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:11,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:11,018 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:11,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:11,018 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/B of 26bdcc7959673ac8abf209b84227d813 into c8c0751bd37446db94b35a1059f79c1e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:11,018 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:11,018 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/B, priority=13, startTime=1732554550456; duration=0sec 2024-11-25T17:09:11,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:11,019 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:11,019 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:B 2024-11-25T17:09:11,028 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/cd9abfb2353b40ac8d166fa94f5fe807 2024-11-25T17:09:11,047 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/2f851c6a22a44d5ba89341f39bec031f is 50, key is test_row_0/C:col10/1732554550504/Put/seqid=0 2024-11-25T17:09:11,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742127_1303 (size=12301) 2024-11-25T17:09:11,090 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/2f851c6a22a44d5ba89341f39bec031f 2024-11-25T17:09:11,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/4b3a57f9f84b400d95d26268e98d8330 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4b3a57f9f84b400d95d26268e98d8330 2024-11-25T17:09:11,100 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4b3a57f9f84b400d95d26268e98d8330, entries=150, sequenceid=334, filesize=12.0 K 2024-11-25T17:09:11,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/cd9abfb2353b40ac8d166fa94f5fe807 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/cd9abfb2353b40ac8d166fa94f5fe807 2024-11-25T17:09:11,106 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/cd9abfb2353b40ac8d166fa94f5fe807, entries=150, sequenceid=334, filesize=12.0 K 2024-11-25T17:09:11,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/2f851c6a22a44d5ba89341f39bec031f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/2f851c6a22a44d5ba89341f39bec031f 2024-11-25T17:09:11,111 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/2f851c6a22a44d5ba89341f39bec031f, entries=150, sequenceid=334, filesize=12.0 K 2024-11-25T17:09:11,113 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 26bdcc7959673ac8abf209b84227d813 in 608ms, sequenceid=334, compaction requested=false 2024-11-25T17:09:11,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:11,170 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:11,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-25T17:09:11,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:11,170 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-25T17:09:11,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:11,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:11,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:11,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:11,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:11,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:11,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-25T17:09:11,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/3ab78f2a057b4e66bffbf19a62265469 is 50, key is test_row_0/A:col10/1732554550578/Put/seqid=0 2024-11-25T17:09:11,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:11,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:11,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742128_1304 (size=12301) 2024-11-25T17:09:11,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:11,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554611216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:11,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:11,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554611224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:11,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:11,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554611224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:11,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:11,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554611225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:11,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554611329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:11,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554611329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:11,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554611334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:11,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:11,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554611337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:11,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:11,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554611533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:11,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:11,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554611534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:11,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:11,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554611538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:11,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:11,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554611544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:11,624 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/3ab78f2a057b4e66bffbf19a62265469 2024-11-25T17:09:11,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/6a0339c591004201a6f0853e4dc8abb4 is 50, key is test_row_0/B:col10/1732554550578/Put/seqid=0 2024-11-25T17:09:11,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742129_1305 (size=12301) 2024-11-25T17:09:11,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:11,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:11,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554611838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:11,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554611838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:11,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:11,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554611846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:11,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:11,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554611850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,100 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/6a0339c591004201a6f0853e4dc8abb4 2024-11-25T17:09:12,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/3225429522644d8580722eca99f2ee0a is 50, key is test_row_0/C:col10/1732554550578/Put/seqid=0 2024-11-25T17:09:12,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742130_1306 (size=12301) 2024-11-25T17:09:12,114 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/3225429522644d8580722eca99f2ee0a 2024-11-25T17:09:12,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/3ab78f2a057b4e66bffbf19a62265469 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3ab78f2a057b4e66bffbf19a62265469 2024-11-25T17:09:12,126 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3ab78f2a057b4e66bffbf19a62265469, entries=150, sequenceid=358, filesize=12.0 K 2024-11-25T17:09:12,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/6a0339c591004201a6f0853e4dc8abb4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6a0339c591004201a6f0853e4dc8abb4 2024-11-25T17:09:12,130 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6a0339c591004201a6f0853e4dc8abb4, entries=150, sequenceid=358, filesize=12.0 K 2024-11-25T17:09:12,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/3225429522644d8580722eca99f2ee0a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3225429522644d8580722eca99f2ee0a 2024-11-25T17:09:12,136 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3225429522644d8580722eca99f2ee0a, entries=150, sequenceid=358, filesize=12.0 K 2024-11-25T17:09:12,137 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 26bdcc7959673ac8abf209b84227d813 in 966ms, sequenceid=358, compaction requested=true 2024-11-25T17:09:12,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:12,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:12,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-25T17:09:12,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-25T17:09:12,139 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-25T17:09:12,139 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0670 sec 2024-11-25T17:09:12,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 2.0770 sec 2024-11-25T17:09:12,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-25T17:09:12,179 INFO [Thread-1087 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-25T17:09:12,180 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:12,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-25T17:09:12,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-25T17:09:12,182 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:12,182 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:12,182 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:12,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-25T17:09:12,334 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:12,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-25T17:09:12,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:12,335 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-25T17:09:12,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:12,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:12,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:12,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:12,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:12,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:12,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:12,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:12,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/00060384e84d4f4aa2f2c7eda50d4183 is 50, key is test_row_0/A:col10/1732554551219/Put/seqid=0 2024-11-25T17:09:12,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742131_1307 (size=12301) 2024-11-25T17:09:12,370 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/00060384e84d4f4aa2f2c7eda50d4183 2024-11-25T17:09:12,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554612382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554612385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554612387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554612387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/5b70caf07dd9492c97a3f38b925aed4b is 50, key is test_row_0/B:col10/1732554551219/Put/seqid=0 2024-11-25T17:09:12,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742132_1308 (size=12301) 2024-11-25T17:09:12,426 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/5b70caf07dd9492c97a3f38b925aed4b 2024-11-25T17:09:12,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/cece873f94e948df9fed4025591919c4 is 50, key is test_row_0/C:col10/1732554551219/Put/seqid=0 2024-11-25T17:09:12,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-25T17:09:12,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554612489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554612493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554612493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554612493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742133_1309 (size=12301) 2024-11-25T17:09:12,497 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/cece873f94e948df9fed4025591919c4 2024-11-25T17:09:12,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/00060384e84d4f4aa2f2c7eda50d4183 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/00060384e84d4f4aa2f2c7eda50d4183 2024-11-25T17:09:12,507 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/00060384e84d4f4aa2f2c7eda50d4183, entries=150, sequenceid=374, filesize=12.0 K 2024-11-25T17:09:12,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/5b70caf07dd9492c97a3f38b925aed4b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/5b70caf07dd9492c97a3f38b925aed4b 2024-11-25T17:09:12,512 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/5b70caf07dd9492c97a3f38b925aed4b, entries=150, sequenceid=374, filesize=12.0 K 2024-11-25T17:09:12,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/cece873f94e948df9fed4025591919c4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/cece873f94e948df9fed4025591919c4 2024-11-25T17:09:12,517 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/cece873f94e948df9fed4025591919c4, entries=150, sequenceid=374, filesize=12.0 K 2024-11-25T17:09:12,518 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 26bdcc7959673ac8abf209b84227d813 in 183ms, sequenceid=374, compaction requested=true 2024-11-25T17:09:12,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:12,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:12,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-25T17:09:12,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-25T17:09:12,521 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-25T17:09:12,521 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 338 msec 2024-11-25T17:09:12,523 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 342 msec 2024-11-25T17:09:12,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-25T17:09:12,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:12,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:12,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:12,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:12,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:12,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:12,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:12,715 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/4b61b6a66c8744429a56041604c6ab79 is 50, key is test_row_0/A:col10/1732554552697/Put/seqid=0 2024-11-25T17:09:12,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554612726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554612728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554612728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554612729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742134_1310 (size=12301) 2024-11-25T17:09:12,749 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/4b61b6a66c8744429a56041604c6ab79 2024-11-25T17:09:12,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/8cf894a7adbd4af5a827a9c4cdc46576 is 50, key is test_row_0/B:col10/1732554552697/Put/seqid=0 2024-11-25T17:09:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-25T17:09:12,790 INFO [Thread-1087 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-25T17:09:12,791 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:12,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-25T17:09:12,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-25T17:09:12,793 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:12,794 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:12,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:12,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742135_1311 (size=12301) 2024-11-25T17:09:12,815 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/8cf894a7adbd4af5a827a9c4cdc46576 2024-11-25T17:09:12,834 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/a850fe2cc1044d5198e539e0e81357f5 is 50, key is test_row_0/C:col10/1732554552697/Put/seqid=0 2024-11-25T17:09:12,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554612833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554612834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554612835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554612836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742136_1312 (size=12301) 2024-11-25T17:09:12,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-25T17:09:12,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:12,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554612913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:12,916 DEBUG [Thread-1083 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., hostname=6579369734b6,41865,1732554474464, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:09:12,945 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:12,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-25T17:09:12,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:12,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:12,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:12,946 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:12,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:12,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:13,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554613039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:13,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:13,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554613041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:13,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554613041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:13,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554613047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:13,098 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:13,098 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-25T17:09:13,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:13,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:13,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:13,099 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-25T17:09:13,251 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:13,252 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-25T17:09:13,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:13,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:13,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:13,252 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,278 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/a850fe2cc1044d5198e539e0e81357f5 2024-11-25T17:09:13,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/4b61b6a66c8744429a56041604c6ab79 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4b61b6a66c8744429a56041604c6ab79 2024-11-25T17:09:13,288 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4b61b6a66c8744429a56041604c6ab79, entries=150, sequenceid=395, filesize=12.0 K 2024-11-25T17:09:13,289 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/8cf894a7adbd4af5a827a9c4cdc46576 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/8cf894a7adbd4af5a827a9c4cdc46576 2024-11-25T17:09:13,293 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/8cf894a7adbd4af5a827a9c4cdc46576, entries=150, sequenceid=395, filesize=12.0 K 2024-11-25T17:09:13,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/a850fe2cc1044d5198e539e0e81357f5 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/a850fe2cc1044d5198e539e0e81357f5 2024-11-25T17:09:13,298 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/a850fe2cc1044d5198e539e0e81357f5, entries=150, sequenceid=395, filesize=12.0 K 2024-11-25T17:09:13,299 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 26bdcc7959673ac8abf209b84227d813 in 600ms, sequenceid=395, compaction requested=true 2024-11-25T17:09:13,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:13,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:13,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:13,299 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-25T17:09:13,299 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-25T17:09:13,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:13,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:13,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:13,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:13,309 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62255 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-25T17:09:13,309 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/B is initiating minor compaction (all files) 2024-11-25T17:09:13,309 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/B in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:13,309 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/c8c0751bd37446db94b35a1059f79c1e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/cd9abfb2353b40ac8d166fa94f5fe807, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6a0339c591004201a6f0853e4dc8abb4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/5b70caf07dd9492c97a3f38b925aed4b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/8cf894a7adbd4af5a827a9c4cdc46576] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=60.8 K 2024-11-25T17:09:13,313 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62255 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-25T17:09:13,313 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/A is initiating minor compaction (all files) 2024-11-25T17:09:13,313 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/A in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:13,313 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/6a36279ee2cb488db2851eca21378544, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4b3a57f9f84b400d95d26268e98d8330, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3ab78f2a057b4e66bffbf19a62265469, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/00060384e84d4f4aa2f2c7eda50d4183, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4b61b6a66c8744429a56041604c6ab79] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=60.8 K 2024-11-25T17:09:13,314 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a36279ee2cb488db2851eca21378544, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732554548741 2024-11-25T17:09:13,314 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting c8c0751bd37446db94b35a1059f79c1e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732554548741 2024-11-25T17:09:13,314 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b3a57f9f84b400d95d26268e98d8330, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732554549872 2024-11-25T17:09:13,315 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting cd9abfb2353b40ac8d166fa94f5fe807, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732554549872 2024-11-25T17:09:13,315 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a0339c591004201a6f0853e4dc8abb4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732554550575 2024-11-25T17:09:13,315 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ab78f2a057b4e66bffbf19a62265469, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732554550575 2024-11-25T17:09:13,316 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00060384e84d4f4aa2f2c7eda50d4183, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732554551208 2024-11-25T17:09:13,316 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b70caf07dd9492c97a3f38b925aed4b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732554551208 2024-11-25T17:09:13,316 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b61b6a66c8744429a56041604c6ab79, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732554552384 2024-11-25T17:09:13,316 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 8cf894a7adbd4af5a827a9c4cdc46576, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732554552384 2024-11-25T17:09:13,342 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#B#compaction#268 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:13,343 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/0b9bb4928bdb443ab926456eedea564a is 50, key is test_row_0/B:col10/1732554552697/Put/seqid=0 2024-11-25T17:09:13,352 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#A#compaction#269 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:13,352 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/ac247370ec3f43bd85983a7133c70a51 is 50, key is test_row_0/A:col10/1732554552697/Put/seqid=0 2024-11-25T17:09:13,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:13,355 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-25T17:09:13,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:13,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:13,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:13,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:13,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:13,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:13,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742137_1313 (size=13221) 2024-11-25T17:09:13,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742138_1314 (size=13221) 2024-11-25T17:09:13,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/cd53b69383c945cdb9caf91d01321fca is 50, key is test_row_0/A:col10/1732554553355/Put/seqid=0 2024-11-25T17:09:13,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:13,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554613395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:13,405 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:13,405 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-25T17:09:13,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:13,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:13,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:13,406 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:13,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554613395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:13,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-25T17:09:13,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:13,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554613402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:13,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:13,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554613409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:13,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742139_1315 (size=12301) 2024-11-25T17:09:13,440 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/cd53b69383c945cdb9caf91d01321fca 2024-11-25T17:09:13,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/0818d59ab963425b83d00de3d27dc05c is 50, key is test_row_0/B:col10/1732554553355/Put/seqid=0 2024-11-25T17:09:13,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742140_1316 (size=12301) 2024-11-25T17:09:13,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/0818d59ab963425b83d00de3d27dc05c 2024-11-25T17:09:13,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/eb7a0a198af5454e939f4a7ad4bede05 is 50, key is test_row_0/C:col10/1732554553355/Put/seqid=0 2024-11-25T17:09:13,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:13,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554613508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:13,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:13,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554613508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:13,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:13,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554613510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:13,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:13,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554613512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:13,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742141_1317 (size=12301) 2024-11-25T17:09:13,558 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:13,558 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-25T17:09:13,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:13,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:13,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:13,559 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,710 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:13,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-25T17:09:13,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:13,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:13,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:13,714 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554613714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:13,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554613714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:13,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:13,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554613722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:13,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:13,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554613722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:13,778 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/0b9bb4928bdb443ab926456eedea564a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/0b9bb4928bdb443ab926456eedea564a 2024-11-25T17:09:13,784 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/B of 26bdcc7959673ac8abf209b84227d813 into 0b9bb4928bdb443ab926456eedea564a(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:13,784 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:13,784 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/B, priority=11, startTime=1732554553299; duration=0sec 2024-11-25T17:09:13,784 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:13,784 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:B 2024-11-25T17:09:13,784 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-25T17:09:13,786 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62221 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-25T17:09:13,786 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/C is initiating minor compaction (all files) 2024-11-25T17:09:13,786 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/C in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:13,786 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/51ca08ad167547689d77f40b93d0f3df, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/2f851c6a22a44d5ba89341f39bec031f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3225429522644d8580722eca99f2ee0a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/cece873f94e948df9fed4025591919c4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/a850fe2cc1044d5198e539e0e81357f5] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=60.8 K 2024-11-25T17:09:13,786 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 51ca08ad167547689d77f40b93d0f3df, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732554548741 2024-11-25T17:09:13,787 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f851c6a22a44d5ba89341f39bec031f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1732554549872 2024-11-25T17:09:13,787 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 3225429522644d8580722eca99f2ee0a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732554550575 2024-11-25T17:09:13,788 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting cece873f94e948df9fed4025591919c4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1732554551208 2024-11-25T17:09:13,789 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting a850fe2cc1044d5198e539e0e81357f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732554552384 2024-11-25T17:09:13,799 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/ac247370ec3f43bd85983a7133c70a51 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/ac247370ec3f43bd85983a7133c70a51 2024-11-25T17:09:13,804 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/A of 26bdcc7959673ac8abf209b84227d813 into ac247370ec3f43bd85983a7133c70a51(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:13,804 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:13,804 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/A, priority=11, startTime=1732554553299; duration=0sec 2024-11-25T17:09:13,804 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:13,804 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:A 2024-11-25T17:09:13,809 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#C#compaction#273 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:13,810 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/a80493a9d10a41cfaf401fe3271951a3 is 50, key is test_row_0/C:col10/1732554552697/Put/seqid=0 2024-11-25T17:09:13,865 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:13,866 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-25T17:09:13,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:13,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742142_1318 (size=13187) 2024-11-25T17:09:13,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:13,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:13,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:13,874 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/a80493a9d10a41cfaf401fe3271951a3 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/a80493a9d10a41cfaf401fe3271951a3 2024-11-25T17:09:13,880 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/C of 26bdcc7959673ac8abf209b84227d813 into a80493a9d10a41cfaf401fe3271951a3(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:13,880 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:13,880 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/C, priority=11, startTime=1732554553300; duration=0sec 2024-11-25T17:09:13,880 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:13,880 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:C 2024-11-25T17:09:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-25T17:09:13,938 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/eb7a0a198af5454e939f4a7ad4bede05 2024-11-25T17:09:13,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/cd53b69383c945cdb9caf91d01321fca as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/cd53b69383c945cdb9caf91d01321fca 2024-11-25T17:09:13,950 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/cd53b69383c945cdb9caf91d01321fca, entries=150, sequenceid=413, filesize=12.0 K 2024-11-25T17:09:13,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/0818d59ab963425b83d00de3d27dc05c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/0818d59ab963425b83d00de3d27dc05c 2024-11-25T17:09:13,956 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/0818d59ab963425b83d00de3d27dc05c, entries=150, sequenceid=413, filesize=12.0 K 2024-11-25T17:09:13,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/eb7a0a198af5454e939f4a7ad4bede05 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/eb7a0a198af5454e939f4a7ad4bede05 2024-11-25T17:09:13,962 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/eb7a0a198af5454e939f4a7ad4bede05, entries=150, sequenceid=413, filesize=12.0 K 2024-11-25T17:09:13,963 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 26bdcc7959673ac8abf209b84227d813 in 608ms, sequenceid=413, compaction requested=false 2024-11-25T17:09:13,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:14,019 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:14,020 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-25T17:09:14,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:14,020 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-25T17:09:14,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:14,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:14,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:14,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:14,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:14,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:14,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/f2833defe9654370912c10add3329beb is 50, key is test_row_0/A:col10/1732554553394/Put/seqid=0 2024-11-25T17:09:14,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:14,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:14,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742143_1319 (size=12301) 2024-11-25T17:09:14,051 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/f2833defe9654370912c10add3329beb 2024-11-25T17:09:14,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/da8c0edb7f724a93a11a30587c2cd708 is 50, key is test_row_0/B:col10/1732554553394/Put/seqid=0 2024-11-25T17:09:14,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742144_1320 (size=12301) 2024-11-25T17:09:14,094 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/da8c0edb7f724a93a11a30587c2cd708 2024-11-25T17:09:14,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554614104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554614106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554614106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554614107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/5af914333a5b4be09008666ad5bab2af is 50, key is test_row_0/C:col10/1732554553394/Put/seqid=0 2024-11-25T17:09:14,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742145_1321 (size=12301) 2024-11-25T17:09:14,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554614210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554614212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554614212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554614212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554614416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554614417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554614418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554614418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,556 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/5af914333a5b4be09008666ad5bab2af 2024-11-25T17:09:14,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/f2833defe9654370912c10add3329beb as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/f2833defe9654370912c10add3329beb 2024-11-25T17:09:14,568 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/f2833defe9654370912c10add3329beb, entries=150, sequenceid=434, filesize=12.0 K 2024-11-25T17:09:14,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/da8c0edb7f724a93a11a30587c2cd708 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/da8c0edb7f724a93a11a30587c2cd708 2024-11-25T17:09:14,585 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/da8c0edb7f724a93a11a30587c2cd708, entries=150, sequenceid=434, filesize=12.0 K 2024-11-25T17:09:14,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/5af914333a5b4be09008666ad5bab2af as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/5af914333a5b4be09008666ad5bab2af 2024-11-25T17:09:14,605 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/5af914333a5b4be09008666ad5bab2af, entries=150, sequenceid=434, filesize=12.0 K 2024-11-25T17:09:14,606 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 26bdcc7959673ac8abf209b84227d813 in 586ms, sequenceid=434, compaction requested=true 2024-11-25T17:09:14,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:14,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:14,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-25T17:09:14,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-25T17:09:14,609 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-25T17:09:14,609 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8130 sec 2024-11-25T17:09:14,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.8190 sec 2024-11-25T17:09:14,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-25T17:09:14,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:14,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:14,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:14,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:14,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:14,725 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:14,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:14,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/51da79a7305c481eb0cb6e02e6e611f5 is 50, key is test_row_0/A:col10/1732554554104/Put/seqid=0 2024-11-25T17:09:14,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554614771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554614772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554614773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554614776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742146_1322 (size=12301) 2024-11-25T17:09:14,814 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/51da79a7305c481eb0cb6e02e6e611f5 2024-11-25T17:09:14,855 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/5ba6612a50d448ccbfb2c5908575d471 is 50, key is test_row_0/B:col10/1732554554104/Put/seqid=0 2024-11-25T17:09:14,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742147_1323 (size=12301) 2024-11-25T17:09:14,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554614878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554614881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,886 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554614882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,886 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:14,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554614882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:14,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-25T17:09:14,908 INFO [Thread-1087 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-25T17:09:14,913 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:14,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-11-25T17:09:14,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-25T17:09:14,915 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:14,916 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:14,916 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:15,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-25T17:09:15,067 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:15,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-25T17:09:15,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:15,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:15,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:15,068 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:15,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:15,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:15,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:15,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554615090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:15,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:15,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554615093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:15,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:15,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554615093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:15,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:15,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554615098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:15,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-25T17:09:15,221 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:15,221 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-25T17:09:15,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:15,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:15,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:15,222 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:15,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:15,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:15,286 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/5ba6612a50d448ccbfb2c5908575d471 2024-11-25T17:09:15,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/d6cdf6321f664397a9e44a05f7708e14 is 50, key is test_row_0/C:col10/1732554554104/Put/seqid=0 2024-11-25T17:09:15,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742148_1324 (size=12301) 2024-11-25T17:09:15,374 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:15,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-25T17:09:15,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:15,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:15,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:15,375 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:15,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:15,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:15,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:15,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554615398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:15,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:15,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554615399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:15,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:15,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554615401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:15,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:15,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554615409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:15,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-25T17:09:15,533 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:15,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-25T17:09:15,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:15,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:15,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:15,538 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:15,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:15,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:15,702 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:15,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-25T17:09:15,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:15,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:15,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:15,703 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:15,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:15,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:15,774 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/d6cdf6321f664397a9e44a05f7708e14 2024-11-25T17:09:15,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/51da79a7305c481eb0cb6e02e6e611f5 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/51da79a7305c481eb0cb6e02e6e611f5 2024-11-25T17:09:15,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/51da79a7305c481eb0cb6e02e6e611f5, entries=150, sequenceid=454, filesize=12.0 K 2024-11-25T17:09:15,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/5ba6612a50d448ccbfb2c5908575d471 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/5ba6612a50d448ccbfb2c5908575d471 2024-11-25T17:09:15,804 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/5ba6612a50d448ccbfb2c5908575d471, entries=150, sequenceid=454, filesize=12.0 K 2024-11-25T17:09:15,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/d6cdf6321f664397a9e44a05f7708e14 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/d6cdf6321f664397a9e44a05f7708e14 2024-11-25T17:09:15,810 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/d6cdf6321f664397a9e44a05f7708e14, entries=150, sequenceid=454, filesize=12.0 K 2024-11-25T17:09:15,811 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 26bdcc7959673ac8abf209b84227d813 in 1087ms, sequenceid=454, compaction requested=true 2024-11-25T17:09:15,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:15,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:15,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:15,811 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:15,811 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:15,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:15,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:15,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:15,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:15,813 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50124 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:15,813 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/B is initiating minor compaction (all files) 2024-11-25T17:09:15,813 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/B in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:15,814 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50124 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:15,814 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/0b9bb4928bdb443ab926456eedea564a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/0818d59ab963425b83d00de3d27dc05c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/da8c0edb7f724a93a11a30587c2cd708, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/5ba6612a50d448ccbfb2c5908575d471] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=48.9 K 2024-11-25T17:09:15,814 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/A is initiating minor compaction (all files) 2024-11-25T17:09:15,814 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/A in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:15,814 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/ac247370ec3f43bd85983a7133c70a51, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/cd53b69383c945cdb9caf91d01321fca, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/f2833defe9654370912c10add3329beb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/51da79a7305c481eb0cb6e02e6e611f5] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=48.9 K 2024-11-25T17:09:15,814 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b9bb4928bdb443ab926456eedea564a, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732554552384 2024-11-25T17:09:15,814 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac247370ec3f43bd85983a7133c70a51, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732554552384 2024-11-25T17:09:15,814 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 0818d59ab963425b83d00de3d27dc05c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732554552727 2024-11-25T17:09:15,815 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd53b69383c945cdb9caf91d01321fca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732554552727 2024-11-25T17:09:15,815 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting da8c0edb7f724a93a11a30587c2cd708, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732554553389 2024-11-25T17:09:15,815 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ba6612a50d448ccbfb2c5908575d471, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732554554104 2024-11-25T17:09:15,815 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting f2833defe9654370912c10add3329beb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732554553389 2024-11-25T17:09:15,816 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51da79a7305c481eb0cb6e02e6e611f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732554554104 2024-11-25T17:09:15,826 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#B#compaction#280 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:15,826 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/180b5e9dfd24402f928fd606ecc0b1b8 is 50, key is test_row_0/B:col10/1732554554104/Put/seqid=0 2024-11-25T17:09:15,829 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#A#compaction#281 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:15,829 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/309888863d22411086f91aaf4280a578 is 50, key is test_row_0/A:col10/1732554554104/Put/seqid=0 2024-11-25T17:09:15,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742149_1325 (size=13357) 2024-11-25T17:09:15,855 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:15,857 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-25T17:09:15,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:15,858 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-25T17:09:15,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:15,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:15,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:15,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:15,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:15,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:15,860 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/180b5e9dfd24402f928fd606ecc0b1b8 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/180b5e9dfd24402f928fd606ecc0b1b8 2024-11-25T17:09:15,871 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/B of 26bdcc7959673ac8abf209b84227d813 into 180b5e9dfd24402f928fd606ecc0b1b8(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:15,871 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:15,871 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/B, priority=12, startTime=1732554555811; duration=0sec 2024-11-25T17:09:15,871 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:15,871 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:B 2024-11-25T17:09:15,871 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:15,873 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:15,873 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/C is initiating minor compaction (all files) 2024-11-25T17:09:15,873 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/C in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:15,873 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/a80493a9d10a41cfaf401fe3271951a3, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/eb7a0a198af5454e939f4a7ad4bede05, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/5af914333a5b4be09008666ad5bab2af, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/d6cdf6321f664397a9e44a05f7708e14] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=48.9 K 2024-11-25T17:09:15,874 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting a80493a9d10a41cfaf401fe3271951a3, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732554552384 2024-11-25T17:09:15,874 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting eb7a0a198af5454e939f4a7ad4bede05, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732554552727 2024-11-25T17:09:15,874 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 5af914333a5b4be09008666ad5bab2af, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732554553389 2024-11-25T17:09:15,875 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting d6cdf6321f664397a9e44a05f7708e14, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732554554104 2024-11-25T17:09:15,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:15,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:15,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/3bb4d388007f4b979b7b55ac0941e970 is 50, key is test_row_0/A:col10/1732554554774/Put/seqid=0 2024-11-25T17:09:15,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742150_1326 (size=13357) 2024-11-25T17:09:15,922 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/309888863d22411086f91aaf4280a578 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/309888863d22411086f91aaf4280a578 2024-11-25T17:09:15,924 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#C#compaction#283 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:15,925 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/f723f4fb68974d7f82124ecea5f0b71a is 50, key is test_row_0/C:col10/1732554554104/Put/seqid=0 2024-11-25T17:09:15,933 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/A of 26bdcc7959673ac8abf209b84227d813 into 309888863d22411086f91aaf4280a578(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:15,933 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:15,933 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/A, priority=12, startTime=1732554555811; duration=0sec 2024-11-25T17:09:15,933 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:15,933 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:A 2024-11-25T17:09:15,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742151_1327 (size=12301) 2024-11-25T17:09:15,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742152_1328 (size=13323) 2024-11-25T17:09:15,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:15,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554615970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:15,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:15,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554615969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:15,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:15,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554615974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:15,980 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:15,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554615973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:15,986 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/f723f4fb68974d7f82124ecea5f0b71a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/f723f4fb68974d7f82124ecea5f0b71a 2024-11-25T17:09:15,992 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/C of 26bdcc7959673ac8abf209b84227d813 into f723f4fb68974d7f82124ecea5f0b71a(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:15,992 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:15,992 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/C, priority=12, startTime=1732554555812; duration=0sec 2024-11-25T17:09:15,992 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:15,992 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:C 2024-11-25T17:09:16,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-25T17:09:16,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:16,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554616079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:16,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:16,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554616085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:16,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:16,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554616085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:16,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:16,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554616086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:16,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:16,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554616283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:16,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:16,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554616288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:16,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:16,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554616288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:16,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:16,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554616305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:16,353 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=470 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/3bb4d388007f4b979b7b55ac0941e970 2024-11-25T17:09:16,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/fbe45a903acc41ddaf09a5deed31fa14 is 50, key is test_row_0/B:col10/1732554554774/Put/seqid=0 2024-11-25T17:09:16,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742153_1329 (size=12301) 2024-11-25T17:09:16,402 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=470 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/fbe45a903acc41ddaf09a5deed31fa14 2024-11-25T17:09:16,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/313e9aa000ee43219d4da2cd41845dc0 is 50, key is test_row_0/C:col10/1732554554774/Put/seqid=0 2024-11-25T17:09:16,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742154_1330 (size=12301) 2024-11-25T17:09:16,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:16,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554616586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:16,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:16,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554616594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:16,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:16,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554616599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:16,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:16,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554616611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:16,853 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=470 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/313e9aa000ee43219d4da2cd41845dc0 2024-11-25T17:09:16,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/3bb4d388007f4b979b7b55ac0941e970 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3bb4d388007f4b979b7b55ac0941e970 2024-11-25T17:09:16,864 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3bb4d388007f4b979b7b55ac0941e970, entries=150, sequenceid=470, filesize=12.0 K 2024-11-25T17:09:16,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/fbe45a903acc41ddaf09a5deed31fa14 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/fbe45a903acc41ddaf09a5deed31fa14 2024-11-25T17:09:16,874 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/fbe45a903acc41ddaf09a5deed31fa14, entries=150, sequenceid=470, filesize=12.0 K 2024-11-25T17:09:16,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/313e9aa000ee43219d4da2cd41845dc0 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/313e9aa000ee43219d4da2cd41845dc0 2024-11-25T17:09:16,879 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/313e9aa000ee43219d4da2cd41845dc0, entries=150, sequenceid=470, filesize=12.0 K 2024-11-25T17:09:16,880 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 26bdcc7959673ac8abf209b84227d813 in 1022ms, sequenceid=470, compaction requested=false 2024-11-25T17:09:16,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:16,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:16,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-11-25T17:09:16,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-11-25T17:09:16,891 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-25T17:09:16,891 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9730 sec 2024-11-25T17:09:16,893 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 1.9790 sec 2024-11-25T17:09:16,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:16,927 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-25T17:09:16,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:16,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:16,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:16,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:16,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:16,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:16,951 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/a28e7227c3f044c5bc46bc2aa3cc0660 is 50, key is test_row_0/A:col10/1732554556926/Put/seqid=0 2024-11-25T17:09:16,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:16,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554616975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:16,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742155_1331 (size=12301) 2024-11-25T17:09:17,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-25T17:09:17,019 INFO [Thread-1087 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-25T17:09:17,021 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:17,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-11-25T17:09:17,023 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:17,023 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:17,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-25T17:09:17,023 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:17,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:17,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554617077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:17,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:17,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554617090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:17,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:17,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554617098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:17,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:17,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554617101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:17,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:17,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554617119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:17,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-25T17:09:17,175 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:17,176 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-25T17:09:17,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:17,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:17,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:17,176 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,196 DEBUG [Thread-1092 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72537a47 to 127.0.0.1:56265 2024-11-25T17:09:17,196 DEBUG [Thread-1092 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:17,197 DEBUG [Thread-1090 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14ed1e44 to 127.0.0.1:56265 2024-11-25T17:09:17,197 DEBUG [Thread-1090 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:17,198 DEBUG [Thread-1094 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x036642cb to 127.0.0.1:56265 2024-11-25T17:09:17,198 DEBUG [Thread-1094 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:17,199 DEBUG [Thread-1088 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x03a703d2 to 127.0.0.1:56265 2024-11-25T17:09:17,199 DEBUG [Thread-1088 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:17,201 DEBUG [Thread-1096 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c299cfb to 127.0.0.1:56265 2024-11-25T17:09:17,201 DEBUG [Thread-1096 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:17,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:17,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554617281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:17,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-25T17:09:17,332 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:17,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-25T17:09:17,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:17,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:17,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:17,333 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/a28e7227c3f044c5bc46bc2aa3cc0660 2024-11-25T17:09:17,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/82db3eea1e3e4f94a330937df1b8f256 is 50, key is test_row_0/B:col10/1732554556926/Put/seqid=0 2024-11-25T17:09:17,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742156_1332 (size=12301) 2024-11-25T17:09:17,485 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:17,485 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-25T17:09:17,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:17,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:17,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:17,485 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:17,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554617585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:17,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-25T17:09:17,637 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:17,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-25T17:09:17,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:17,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:17,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:17,638 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,790 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:17,790 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-25T17:09:17,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:17,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:17,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:17,791 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,808 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/82db3eea1e3e4f94a330937df1b8f256 2024-11-25T17:09:17,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/6a28f416e3aa4abe89b8164451b2e07b is 50, key is test_row_0/C:col10/1732554556926/Put/seqid=0 2024-11-25T17:09:17,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742157_1333 (size=12301) 2024-11-25T17:09:17,942 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:17,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-25T17:09:17,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:17,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:17,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:17,943 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:17,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:18,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:18,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36530 deadline: 1732554618087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:18,095 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:18,095 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-25T17:09:18,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:18,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:18,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:18,096 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:18,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:18,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:18,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:18,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36544 deadline: 1732554618097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:18,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:18,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36540 deadline: 1732554618108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:18,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:18,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36572 deadline: 1732554618110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:18,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:18,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:36594 deadline: 1732554618127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:18,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-25T17:09:18,219 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/6a28f416e3aa4abe89b8164451b2e07b 2024-11-25T17:09:18,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/a28e7227c3f044c5bc46bc2aa3cc0660 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/a28e7227c3f044c5bc46bc2aa3cc0660 2024-11-25T17:09:18,225 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/a28e7227c3f044c5bc46bc2aa3cc0660, entries=150, sequenceid=494, filesize=12.0 K 2024-11-25T17:09:18,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/82db3eea1e3e4f94a330937df1b8f256 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/82db3eea1e3e4f94a330937df1b8f256 2024-11-25T17:09:18,228 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/82db3eea1e3e4f94a330937df1b8f256, entries=150, sequenceid=494, filesize=12.0 K 2024-11-25T17:09:18,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/6a28f416e3aa4abe89b8164451b2e07b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/6a28f416e3aa4abe89b8164451b2e07b 2024-11-25T17:09:18,231 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/6a28f416e3aa4abe89b8164451b2e07b, entries=150, sequenceid=494, filesize=12.0 K 2024-11-25T17:09:18,231 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 26bdcc7959673ac8abf209b84227d813 in 1304ms, sequenceid=494, compaction requested=true 2024-11-25T17:09:18,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:18,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:18,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:18,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:18,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:18,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 26bdcc7959673ac8abf209b84227d813:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:18,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:18,232 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:18,232 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:18,232 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:18,232 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:18,232 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/B is initiating minor compaction (all files) 2024-11-25T17:09:18,232 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/A is initiating minor compaction (all files) 2024-11-25T17:09:18,233 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/B in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:18,233 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/A in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:18,233 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/180b5e9dfd24402f928fd606ecc0b1b8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/fbe45a903acc41ddaf09a5deed31fa14, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/82db3eea1e3e4f94a330937df1b8f256] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=37.1 K 2024-11-25T17:09:18,233 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/309888863d22411086f91aaf4280a578, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3bb4d388007f4b979b7b55ac0941e970, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/a28e7227c3f044c5bc46bc2aa3cc0660] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=37.1 K 2024-11-25T17:09:18,233 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 180b5e9dfd24402f928fd606ecc0b1b8, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732554554104 2024-11-25T17:09:18,233 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 309888863d22411086f91aaf4280a578, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732554554104 2024-11-25T17:09:18,233 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting fbe45a903acc41ddaf09a5deed31fa14, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=470, earliestPutTs=1732554554727 2024-11-25T17:09:18,233 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3bb4d388007f4b979b7b55ac0941e970, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=470, earliestPutTs=1732554554727 2024-11-25T17:09:18,234 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 82db3eea1e3e4f94a330937df1b8f256, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1732554555930 2024-11-25T17:09:18,234 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting a28e7227c3f044c5bc46bc2aa3cc0660, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1732554555930 2024-11-25T17:09:18,239 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#B#compaction#289 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:18,240 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/fa7a8e20e5f64d85a11aef38ba4f187f is 50, key is test_row_0/B:col10/1732554556926/Put/seqid=0 2024-11-25T17:09:18,242 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#A#compaction#290 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:18,243 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/04650d5f184f459ba80c981f3c7643a1 is 50, key is test_row_0/A:col10/1732554556926/Put/seqid=0 2024-11-25T17:09:18,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742158_1334 (size=13459) 2024-11-25T17:09:18,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742159_1335 (size=13459) 2024-11-25T17:09:18,247 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:18,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-25T17:09:18,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:18,248 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-25T17:09:18,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:18,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:18,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:18,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:18,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:18,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:18,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/353921ee2e9a4322929cad161908a4b1 is 50, key is test_row_0/A:col10/1732554556956/Put/seqid=0 2024-11-25T17:09:18,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742160_1336 (size=12301) 2024-11-25T17:09:18,254 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=509 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/353921ee2e9a4322929cad161908a4b1 2024-11-25T17:09:18,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/0c61ce85d1184088a3842bbde3883977 is 50, key is test_row_0/B:col10/1732554556956/Put/seqid=0 2024-11-25T17:09:18,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742161_1337 (size=12301) 2024-11-25T17:09:18,649 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/fa7a8e20e5f64d85a11aef38ba4f187f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/fa7a8e20e5f64d85a11aef38ba4f187f 2024-11-25T17:09:18,650 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/04650d5f184f459ba80c981f3c7643a1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/04650d5f184f459ba80c981f3c7643a1 2024-11-25T17:09:18,654 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/A of 26bdcc7959673ac8abf209b84227d813 into 04650d5f184f459ba80c981f3c7643a1(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:18,654 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/B of 26bdcc7959673ac8abf209b84227d813 into fa7a8e20e5f64d85a11aef38ba4f187f(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:18,654 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:18,654 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:18,654 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/A, priority=13, startTime=1732554558232; duration=0sec 2024-11-25T17:09:18,654 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/B, priority=13, startTime=1732554558232; duration=0sec 2024-11-25T17:09:18,654 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:18,654 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:A 2024-11-25T17:09:18,654 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:18,654 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:18,654 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:B 2024-11-25T17:09:18,654 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:18,655 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 26bdcc7959673ac8abf209b84227d813/C is initiating minor compaction (all files) 2024-11-25T17:09:18,655 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 26bdcc7959673ac8abf209b84227d813/C in TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:18,655 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/f723f4fb68974d7f82124ecea5f0b71a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/313e9aa000ee43219d4da2cd41845dc0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/6a28f416e3aa4abe89b8164451b2e07b] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp, totalSize=37.0 K 2024-11-25T17:09:18,655 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting f723f4fb68974d7f82124ecea5f0b71a, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=454, earliestPutTs=1732554554104 2024-11-25T17:09:18,655 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 313e9aa000ee43219d4da2cd41845dc0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=470, earliestPutTs=1732554554727 2024-11-25T17:09:18,655 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a28f416e3aa4abe89b8164451b2e07b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1732554555930 2024-11-25T17:09:18,661 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 26bdcc7959673ac8abf209b84227d813#C#compaction#293 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:18,661 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/9270067ab5dc46c0927f5ed9b834d795 is 50, key is test_row_0/C:col10/1732554556926/Put/seqid=0 2024-11-25T17:09:18,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742162_1338 (size=13425) 2024-11-25T17:09:18,666 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=509 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/0c61ce85d1184088a3842bbde3883977 2024-11-25T17:09:18,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/45c1fe346e8e415eb36d1c7aa9a55545 is 50, key is test_row_0/C:col10/1732554556956/Put/seqid=0 2024-11-25T17:09:18,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742163_1339 (size=12301) 2024-11-25T17:09:19,069 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/9270067ab5dc46c0927f5ed9b834d795 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/9270067ab5dc46c0927f5ed9b834d795 2024-11-25T17:09:19,074 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 26bdcc7959673ac8abf209b84227d813/C of 26bdcc7959673ac8abf209b84227d813 into 9270067ab5dc46c0927f5ed9b834d795(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:19,074 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:19,074 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813., storeName=26bdcc7959673ac8abf209b84227d813/C, priority=13, startTime=1732554558232; duration=0sec 2024-11-25T17:09:19,074 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:19,074 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 26bdcc7959673ac8abf209b84227d813:C 2024-11-25T17:09:19,087 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=509 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/45c1fe346e8e415eb36d1c7aa9a55545 2024-11-25T17:09:19,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/353921ee2e9a4322929cad161908a4b1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/353921ee2e9a4322929cad161908a4b1 2024-11-25T17:09:19,094 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/353921ee2e9a4322929cad161908a4b1, entries=150, sequenceid=509, filesize=12.0 K 2024-11-25T17:09:19,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/0c61ce85d1184088a3842bbde3883977 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/0c61ce85d1184088a3842bbde3883977 2024-11-25T17:09:19,098 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/0c61ce85d1184088a3842bbde3883977, entries=150, sequenceid=509, filesize=12.0 K 2024-11-25T17:09:19,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/45c1fe346e8e415eb36d1c7aa9a55545 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/45c1fe346e8e415eb36d1c7aa9a55545 2024-11-25T17:09:19,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:19,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. as already flushing 2024-11-25T17:09:19,099 DEBUG [Thread-1083 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c480dfb to 127.0.0.1:56265 2024-11-25T17:09:19,099 DEBUG [Thread-1083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:19,102 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/45c1fe346e8e415eb36d1c7aa9a55545, entries=150, sequenceid=509, filesize=12.0 K 2024-11-25T17:09:19,103 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=6.71 KB/6870 for 26bdcc7959673ac8abf209b84227d813 in 854ms, sequenceid=509, compaction requested=false 2024-11-25T17:09:19,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:19,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:19,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-11-25T17:09:19,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-11-25T17:09:19,105 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-25T17:09:19,105 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0810 sec 2024-11-25T17:09:19,106 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 2.0840 sec 2024-11-25T17:09:19,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-25T17:09:19,130 INFO [Thread-1087 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-25T17:09:20,103 DEBUG [Thread-1077 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64dc42d9 to 127.0.0.1:56265 2024-11-25T17:09:20,103 DEBUG [Thread-1077 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:20,112 DEBUG [Thread-1081 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x028e73c0 to 127.0.0.1:56265 2024-11-25T17:09:20,112 DEBUG [Thread-1081 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:20,112 DEBUG [Thread-1085 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34cb3991 to 127.0.0.1:56265 2024-11-25T17:09:20,112 DEBUG [Thread-1085 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:20,136 DEBUG [Thread-1079 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c1ac389 to 127.0.0.1:56265 2024-11-25T17:09:20,136 DEBUG [Thread-1079 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:20,137 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-25T17:09:20,137 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-11-25T17:09:20,137 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 99 2024-11-25T17:09:20,137 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 87 2024-11-25T17:09:20,137 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-11-25T17:09:20,137 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 91 2024-11-25T17:09:20,137 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-25T17:09:20,137 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4506 2024-11-25T17:09:20,137 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4366 2024-11-25T17:09:20,137 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4331 2024-11-25T17:09:20,137 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4508 2024-11-25T17:09:20,137 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4367 2024-11-25T17:09:20,137 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-25T17:09:20,137 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-25T17:09:20,137 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f810aa9 to 127.0.0.1:56265 2024-11-25T17:09:20,137 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:20,138 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-25T17:09:20,138 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-25T17:09:20,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:20,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-25T17:09:20,141 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554560141"}]},"ts":"1732554560141"} 2024-11-25T17:09:20,142 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-25T17:09:20,144 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-25T17:09:20,144 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-25T17:09:20,145 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=26bdcc7959673ac8abf209b84227d813, UNASSIGN}] 2024-11-25T17:09:20,146 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=26bdcc7959673ac8abf209b84227d813, UNASSIGN 2024-11-25T17:09:20,146 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=26bdcc7959673ac8abf209b84227d813, regionState=CLOSING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:09:20,147 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-25T17:09:20,147 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure 26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:09:20,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-25T17:09:20,298 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:20,298 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:20,298 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-25T17:09:20,299 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing 26bdcc7959673ac8abf209b84227d813, disabling compactions & flushes 2024-11-25T17:09:20,299 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:20,299 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:20,299 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. after waiting 0 ms 2024-11-25T17:09:20,299 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:20,299 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(2837): Flushing 26bdcc7959673ac8abf209b84227d813 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-25T17:09:20,299 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=A 2024-11-25T17:09:20,299 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:20,299 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=B 2024-11-25T17:09:20,299 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:20,299 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 26bdcc7959673ac8abf209b84227d813, store=C 2024-11-25T17:09:20,299 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:20,306 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/8f668fca3a374fd19231ec0ec15c10b7 is 50, key is test_row_0/A:col10/1732554560135/Put/seqid=0 2024-11-25T17:09:20,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742164_1340 (size=12301) 2024-11-25T17:09:20,311 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=520 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/8f668fca3a374fd19231ec0ec15c10b7 2024-11-25T17:09:20,317 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/9b179c0b277845919c7499e19ad5dbf1 is 50, key is test_row_0/B:col10/1732554560135/Put/seqid=0 2024-11-25T17:09:20,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742165_1341 (size=12301) 2024-11-25T17:09:20,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-25T17:09:20,727 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=520 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/9b179c0b277845919c7499e19ad5dbf1 2024-11-25T17:09:20,734 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/cd8c6e260c4141fbbcbc9beab1b81876 is 50, key is test_row_0/C:col10/1732554560135/Put/seqid=0 2024-11-25T17:09:20,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742166_1342 (size=12301) 2024-11-25T17:09:20,741 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=520 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/cd8c6e260c4141fbbcbc9beab1b81876 2024-11-25T17:09:20,744 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/A/8f668fca3a374fd19231ec0ec15c10b7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/8f668fca3a374fd19231ec0ec15c10b7 2024-11-25T17:09:20,747 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/8f668fca3a374fd19231ec0ec15c10b7, entries=150, sequenceid=520, filesize=12.0 K 2024-11-25T17:09:20,747 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/B/9b179c0b277845919c7499e19ad5dbf1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/9b179c0b277845919c7499e19ad5dbf1 2024-11-25T17:09:20,750 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/9b179c0b277845919c7499e19ad5dbf1, entries=150, sequenceid=520, filesize=12.0 K 2024-11-25T17:09:20,751 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/.tmp/C/cd8c6e260c4141fbbcbc9beab1b81876 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/cd8c6e260c4141fbbcbc9beab1b81876 2024-11-25T17:09:20,754 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/cd8c6e260c4141fbbcbc9beab1b81876, entries=150, sequenceid=520, filesize=12.0 K 2024-11-25T17:09:20,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-25T17:09:20,754 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 26bdcc7959673ac8abf209b84227d813 in 455ms, sequenceid=520, compaction requested=true 2024-11-25T17:09:20,755 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/c39421fcbd9e441998fc94cd7366f63a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/62ee8182335b4cd2af104575855a9a9f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/1b6fac69135a441e935e55b9607158e2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/b5fbae94e51a47749469d6e4ae8550ad, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/d114e9c7b9c840b48840b67ae31596f9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/79c5c54f646545118f6e30b52fb2b46b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/34e469da643d4bd29c0c52d903cc8d9d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3ab88ff231184a498f4b1c75c39c9bf5, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/13587035cdb44b8b82ab64e2d1a3df71, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/5386ab1681834bc6b1ce3a306de18e18, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/6c00f41f10694df3a72ddfddf0b86ed7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/2fb6eaa0c6714c10865e525e4f0495fe, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/1c6ff3f8a8784c86b931e31fad0bbd5a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/e6f9aaf336b34de2a3f9f4c33bdae342, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/676d31c89c164463b4edd3677b1f576c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/9f601d62fa9d4875a59ab65c68772f26, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/0a2b8003d9654dadb44c8702024859cb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/0bdf1c9a59f54f6aba9419af935590e0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4965ee50babd41a8a7783635b113350a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/46028e9c790c416ebe41b856cf75030f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/127a17e92ed94d6e8842e276579f4b18, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/8e000e8e438c424f8cb4375ac013da08, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/6a36279ee2cb488db2851eca21378544, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4b3a57f9f84b400d95d26268e98d8330, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3ab78f2a057b4e66bffbf19a62265469, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/00060384e84d4f4aa2f2c7eda50d4183, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/ac247370ec3f43bd85983a7133c70a51, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4b61b6a66c8744429a56041604c6ab79, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/cd53b69383c945cdb9caf91d01321fca, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/f2833defe9654370912c10add3329beb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/309888863d22411086f91aaf4280a578, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/51da79a7305c481eb0cb6e02e6e611f5, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3bb4d388007f4b979b7b55ac0941e970, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/a28e7227c3f044c5bc46bc2aa3cc0660] to archive 2024-11-25T17:09:20,756 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:09:20,757 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/c39421fcbd9e441998fc94cd7366f63a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/c39421fcbd9e441998fc94cd7366f63a 2024-11-25T17:09:20,758 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/62ee8182335b4cd2af104575855a9a9f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/62ee8182335b4cd2af104575855a9a9f 2024-11-25T17:09:20,759 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/1b6fac69135a441e935e55b9607158e2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/1b6fac69135a441e935e55b9607158e2 2024-11-25T17:09:20,760 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/b5fbae94e51a47749469d6e4ae8550ad to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/b5fbae94e51a47749469d6e4ae8550ad 2024-11-25T17:09:20,761 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/d114e9c7b9c840b48840b67ae31596f9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/d114e9c7b9c840b48840b67ae31596f9 2024-11-25T17:09:20,761 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/79c5c54f646545118f6e30b52fb2b46b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/79c5c54f646545118f6e30b52fb2b46b 2024-11-25T17:09:20,762 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/34e469da643d4bd29c0c52d903cc8d9d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/34e469da643d4bd29c0c52d903cc8d9d 2024-11-25T17:09:20,763 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3ab88ff231184a498f4b1c75c39c9bf5 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3ab88ff231184a498f4b1c75c39c9bf5 2024-11-25T17:09:20,764 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/13587035cdb44b8b82ab64e2d1a3df71 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/13587035cdb44b8b82ab64e2d1a3df71 2024-11-25T17:09:20,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/5386ab1681834bc6b1ce3a306de18e18 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/5386ab1681834bc6b1ce3a306de18e18 2024-11-25T17:09:20,766 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/6c00f41f10694df3a72ddfddf0b86ed7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/6c00f41f10694df3a72ddfddf0b86ed7 2024-11-25T17:09:20,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/2fb6eaa0c6714c10865e525e4f0495fe to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/2fb6eaa0c6714c10865e525e4f0495fe 2024-11-25T17:09:20,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/1c6ff3f8a8784c86b931e31fad0bbd5a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/1c6ff3f8a8784c86b931e31fad0bbd5a 2024-11-25T17:09:20,768 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/e6f9aaf336b34de2a3f9f4c33bdae342 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/e6f9aaf336b34de2a3f9f4c33bdae342 2024-11-25T17:09:20,769 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/676d31c89c164463b4edd3677b1f576c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/676d31c89c164463b4edd3677b1f576c 2024-11-25T17:09:20,770 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/9f601d62fa9d4875a59ab65c68772f26 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/9f601d62fa9d4875a59ab65c68772f26 2024-11-25T17:09:20,771 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/0a2b8003d9654dadb44c8702024859cb to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/0a2b8003d9654dadb44c8702024859cb 2024-11-25T17:09:20,772 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/0bdf1c9a59f54f6aba9419af935590e0 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/0bdf1c9a59f54f6aba9419af935590e0 2024-11-25T17:09:20,773 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4965ee50babd41a8a7783635b113350a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4965ee50babd41a8a7783635b113350a 2024-11-25T17:09:20,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/46028e9c790c416ebe41b856cf75030f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/46028e9c790c416ebe41b856cf75030f 2024-11-25T17:09:20,775 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/127a17e92ed94d6e8842e276579f4b18 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/127a17e92ed94d6e8842e276579f4b18 2024-11-25T17:09:20,776 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/8e000e8e438c424f8cb4375ac013da08 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/8e000e8e438c424f8cb4375ac013da08 2024-11-25T17:09:20,777 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/6a36279ee2cb488db2851eca21378544 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/6a36279ee2cb488db2851eca21378544 2024-11-25T17:09:20,778 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4b3a57f9f84b400d95d26268e98d8330 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4b3a57f9f84b400d95d26268e98d8330 2024-11-25T17:09:20,779 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3ab78f2a057b4e66bffbf19a62265469 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3ab78f2a057b4e66bffbf19a62265469 2024-11-25T17:09:20,780 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/00060384e84d4f4aa2f2c7eda50d4183 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/00060384e84d4f4aa2f2c7eda50d4183 2024-11-25T17:09:20,781 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/ac247370ec3f43bd85983a7133c70a51 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/ac247370ec3f43bd85983a7133c70a51 2024-11-25T17:09:20,782 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4b61b6a66c8744429a56041604c6ab79 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/4b61b6a66c8744429a56041604c6ab79 2024-11-25T17:09:20,783 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/cd53b69383c945cdb9caf91d01321fca to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/cd53b69383c945cdb9caf91d01321fca 2024-11-25T17:09:20,784 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/f2833defe9654370912c10add3329beb to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/f2833defe9654370912c10add3329beb 2024-11-25T17:09:20,785 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/309888863d22411086f91aaf4280a578 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/309888863d22411086f91aaf4280a578 2024-11-25T17:09:20,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/51da79a7305c481eb0cb6e02e6e611f5 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/51da79a7305c481eb0cb6e02e6e611f5 2024-11-25T17:09:20,787 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3bb4d388007f4b979b7b55ac0941e970 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/3bb4d388007f4b979b7b55ac0941e970 2024-11-25T17:09:20,788 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/a28e7227c3f044c5bc46bc2aa3cc0660 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/a28e7227c3f044c5bc46bc2aa3cc0660 2024-11-25T17:09:20,789 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/982138270feb439d8b670e8820f70b70, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/38da297267754c15b98cbdc9e07ac37d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/ed7434604c924bfdb0e3d60c5ccbe2b7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/75bfe25871474675a4f9481316d9a0ea, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/2e10103dbade4fa6907aa2301f975daf, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/8d7abe798d6745d1acc12db6cc0de79f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/dfe60920977d412a92c274de6b4d63eb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6438fec3435c44efbdbb17b22d740c56, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/eadce6ea6bc54e569040d61e114720bf, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/68a2f54e53264b0cb3277eb48189e4d2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/1d213c70416342dea03efcacb6dae485, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/07d0200ffd914364ae62afab7ab6f7c9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/386b6cf7f70c405290d4c40ecc8c7519, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/76d158b1fcb44df0ae2050a647915649, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/b09a8d2702c34939a53e2efcb1c23455, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/7a512df6441843709dbd71d5f7102dfa, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/62308cc69af447fd817241f2332bd2d4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/08883ad13f1340bab9c01dcaa8516971, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/1b32c2c33d4c49688d8688f8254c5085, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6471f0f011fc4b5e92de5f6118c44ade, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/3a08b143bb834c7faa07b99cf9d31dbf, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/c8c0751bd37446db94b35a1059f79c1e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/3239456c0e1444ffbb9ce1ff627a9f7a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/cd9abfb2353b40ac8d166fa94f5fe807, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6a0339c591004201a6f0853e4dc8abb4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/5b70caf07dd9492c97a3f38b925aed4b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/0b9bb4928bdb443ab926456eedea564a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/8cf894a7adbd4af5a827a9c4cdc46576, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/0818d59ab963425b83d00de3d27dc05c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/da8c0edb7f724a93a11a30587c2cd708, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/180b5e9dfd24402f928fd606ecc0b1b8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/5ba6612a50d448ccbfb2c5908575d471, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/fbe45a903acc41ddaf09a5deed31fa14, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/82db3eea1e3e4f94a330937df1b8f256] to archive 2024-11-25T17:09:20,790 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:09:20,791 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/982138270feb439d8b670e8820f70b70 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/982138270feb439d8b670e8820f70b70 2024-11-25T17:09:20,792 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/38da297267754c15b98cbdc9e07ac37d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/38da297267754c15b98cbdc9e07ac37d 2024-11-25T17:09:20,793 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/ed7434604c924bfdb0e3d60c5ccbe2b7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/ed7434604c924bfdb0e3d60c5ccbe2b7 2024-11-25T17:09:20,794 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/75bfe25871474675a4f9481316d9a0ea to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/75bfe25871474675a4f9481316d9a0ea 2024-11-25T17:09:20,795 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/2e10103dbade4fa6907aa2301f975daf to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/2e10103dbade4fa6907aa2301f975daf 2024-11-25T17:09:20,796 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/8d7abe798d6745d1acc12db6cc0de79f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/8d7abe798d6745d1acc12db6cc0de79f 2024-11-25T17:09:20,796 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/dfe60920977d412a92c274de6b4d63eb to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/dfe60920977d412a92c274de6b4d63eb 2024-11-25T17:09:20,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6438fec3435c44efbdbb17b22d740c56 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6438fec3435c44efbdbb17b22d740c56 2024-11-25T17:09:20,798 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/eadce6ea6bc54e569040d61e114720bf to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/eadce6ea6bc54e569040d61e114720bf 2024-11-25T17:09:20,799 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/68a2f54e53264b0cb3277eb48189e4d2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/68a2f54e53264b0cb3277eb48189e4d2 2024-11-25T17:09:20,800 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/1d213c70416342dea03efcacb6dae485 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/1d213c70416342dea03efcacb6dae485 2024-11-25T17:09:20,802 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/07d0200ffd914364ae62afab7ab6f7c9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/07d0200ffd914364ae62afab7ab6f7c9 2024-11-25T17:09:20,808 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/386b6cf7f70c405290d4c40ecc8c7519 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/386b6cf7f70c405290d4c40ecc8c7519 2024-11-25T17:09:20,811 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/76d158b1fcb44df0ae2050a647915649 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/76d158b1fcb44df0ae2050a647915649 2024-11-25T17:09:20,812 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/b09a8d2702c34939a53e2efcb1c23455 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/b09a8d2702c34939a53e2efcb1c23455 2024-11-25T17:09:20,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/7a512df6441843709dbd71d5f7102dfa to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/7a512df6441843709dbd71d5f7102dfa 2024-11-25T17:09:20,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/62308cc69af447fd817241f2332bd2d4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/62308cc69af447fd817241f2332bd2d4 2024-11-25T17:09:20,815 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/08883ad13f1340bab9c01dcaa8516971 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/08883ad13f1340bab9c01dcaa8516971 2024-11-25T17:09:20,816 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/1b32c2c33d4c49688d8688f8254c5085 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/1b32c2c33d4c49688d8688f8254c5085 2024-11-25T17:09:20,818 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6471f0f011fc4b5e92de5f6118c44ade to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6471f0f011fc4b5e92de5f6118c44ade 2024-11-25T17:09:20,819 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/3a08b143bb834c7faa07b99cf9d31dbf to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/3a08b143bb834c7faa07b99cf9d31dbf 2024-11-25T17:09:20,820 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/c8c0751bd37446db94b35a1059f79c1e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/c8c0751bd37446db94b35a1059f79c1e 2024-11-25T17:09:20,821 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/3239456c0e1444ffbb9ce1ff627a9f7a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/3239456c0e1444ffbb9ce1ff627a9f7a 2024-11-25T17:09:20,821 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/cd9abfb2353b40ac8d166fa94f5fe807 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/cd9abfb2353b40ac8d166fa94f5fe807 2024-11-25T17:09:20,822 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6a0339c591004201a6f0853e4dc8abb4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/6a0339c591004201a6f0853e4dc8abb4 2024-11-25T17:09:20,823 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/5b70caf07dd9492c97a3f38b925aed4b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/5b70caf07dd9492c97a3f38b925aed4b 2024-11-25T17:09:20,824 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/0b9bb4928bdb443ab926456eedea564a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/0b9bb4928bdb443ab926456eedea564a 2024-11-25T17:09:20,825 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/8cf894a7adbd4af5a827a9c4cdc46576 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/8cf894a7adbd4af5a827a9c4cdc46576 2024-11-25T17:09:20,826 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/0818d59ab963425b83d00de3d27dc05c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/0818d59ab963425b83d00de3d27dc05c 2024-11-25T17:09:20,827 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/da8c0edb7f724a93a11a30587c2cd708 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/da8c0edb7f724a93a11a30587c2cd708 2024-11-25T17:09:20,828 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/180b5e9dfd24402f928fd606ecc0b1b8 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/180b5e9dfd24402f928fd606ecc0b1b8 2024-11-25T17:09:20,828 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/5ba6612a50d448ccbfb2c5908575d471 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/5ba6612a50d448ccbfb2c5908575d471 2024-11-25T17:09:20,829 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/fbe45a903acc41ddaf09a5deed31fa14 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/fbe45a903acc41ddaf09a5deed31fa14 2024-11-25T17:09:20,830 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/82db3eea1e3e4f94a330937df1b8f256 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/82db3eea1e3e4f94a330937df1b8f256 2024-11-25T17:09:20,831 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3fef88347fa84100a4b4f009b7067376, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/5609f2c860ba4980a490458cfc3e4d07, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c8332d5bd00049a68f8161dcac5d752e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/19989c5b70a848c2a9d3abf60f2cf6b6, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/ecf8c24509c342abb3c4ccef6408c97e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/66c8eb88b63842adb7c371ab20722732, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/d1aec28f3bb64ba895cfbd4f9dac09f2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/b88c02def92f43a1852f5eb1258aeb75, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3bcab74e0ad4453e8a6f9ef10bfab143, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/877e2a4f130447579b6b101166d953a9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/38311d35ec7b4e40af605d626b2bc105, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3a8f379f773143dca8b201f7303b7ae2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c5e63a0118cb4b659f90dbbb5dc0517a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/20e96d8407e54ef9bda55ff652e8e99f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/b9c167fd5f574b039bc6c883d2981ad0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c3da0afd5bd4479ebff79ab6e8eed65e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/75e3d210f6a84fcf935d50cc6e2740e9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/29ed1a5835814f04839c3541c7eb84f9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/13d6883709504e0b84fdfbafdc165ac2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/4cc73c7f7a7b4b31927dd0b5328ec5c0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/51ca08ad167547689d77f40b93d0f3df, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/f0bbda05a30b413695f1666557c8e9d5, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/2f851c6a22a44d5ba89341f39bec031f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3225429522644d8580722eca99f2ee0a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/cece873f94e948df9fed4025591919c4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/a80493a9d10a41cfaf401fe3271951a3, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/a850fe2cc1044d5198e539e0e81357f5, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/eb7a0a198af5454e939f4a7ad4bede05, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/5af914333a5b4be09008666ad5bab2af, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/f723f4fb68974d7f82124ecea5f0b71a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/d6cdf6321f664397a9e44a05f7708e14, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/313e9aa000ee43219d4da2cd41845dc0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/6a28f416e3aa4abe89b8164451b2e07b] to archive 2024-11-25T17:09:20,832 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:09:20,833 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3fef88347fa84100a4b4f009b7067376 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3fef88347fa84100a4b4f009b7067376 2024-11-25T17:09:20,834 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/5609f2c860ba4980a490458cfc3e4d07 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/5609f2c860ba4980a490458cfc3e4d07 2024-11-25T17:09:20,835 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c8332d5bd00049a68f8161dcac5d752e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c8332d5bd00049a68f8161dcac5d752e 2024-11-25T17:09:20,836 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/19989c5b70a848c2a9d3abf60f2cf6b6 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/19989c5b70a848c2a9d3abf60f2cf6b6 2024-11-25T17:09:20,837 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/ecf8c24509c342abb3c4ccef6408c97e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/ecf8c24509c342abb3c4ccef6408c97e 2024-11-25T17:09:20,837 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/66c8eb88b63842adb7c371ab20722732 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/66c8eb88b63842adb7c371ab20722732 2024-11-25T17:09:20,838 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/d1aec28f3bb64ba895cfbd4f9dac09f2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/d1aec28f3bb64ba895cfbd4f9dac09f2 2024-11-25T17:09:20,839 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/b88c02def92f43a1852f5eb1258aeb75 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/b88c02def92f43a1852f5eb1258aeb75 2024-11-25T17:09:20,840 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3bcab74e0ad4453e8a6f9ef10bfab143 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3bcab74e0ad4453e8a6f9ef10bfab143 2024-11-25T17:09:20,841 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/877e2a4f130447579b6b101166d953a9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/877e2a4f130447579b6b101166d953a9 2024-11-25T17:09:20,842 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/38311d35ec7b4e40af605d626b2bc105 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/38311d35ec7b4e40af605d626b2bc105 2024-11-25T17:09:20,843 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3a8f379f773143dca8b201f7303b7ae2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3a8f379f773143dca8b201f7303b7ae2 2024-11-25T17:09:20,845 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c5e63a0118cb4b659f90dbbb5dc0517a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c5e63a0118cb4b659f90dbbb5dc0517a 2024-11-25T17:09:20,847 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/20e96d8407e54ef9bda55ff652e8e99f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/20e96d8407e54ef9bda55ff652e8e99f 2024-11-25T17:09:20,848 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/b9c167fd5f574b039bc6c883d2981ad0 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/b9c167fd5f574b039bc6c883d2981ad0 2024-11-25T17:09:20,850 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c3da0afd5bd4479ebff79ab6e8eed65e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/c3da0afd5bd4479ebff79ab6e8eed65e 2024-11-25T17:09:20,851 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/75e3d210f6a84fcf935d50cc6e2740e9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/75e3d210f6a84fcf935d50cc6e2740e9 2024-11-25T17:09:20,852 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/29ed1a5835814f04839c3541c7eb84f9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/29ed1a5835814f04839c3541c7eb84f9 2024-11-25T17:09:20,853 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/13d6883709504e0b84fdfbafdc165ac2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/13d6883709504e0b84fdfbafdc165ac2 2024-11-25T17:09:20,854 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/4cc73c7f7a7b4b31927dd0b5328ec5c0 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/4cc73c7f7a7b4b31927dd0b5328ec5c0 2024-11-25T17:09:20,861 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/51ca08ad167547689d77f40b93d0f3df to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/51ca08ad167547689d77f40b93d0f3df 2024-11-25T17:09:20,862 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/f0bbda05a30b413695f1666557c8e9d5 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/f0bbda05a30b413695f1666557c8e9d5 2024-11-25T17:09:20,863 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/2f851c6a22a44d5ba89341f39bec031f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/2f851c6a22a44d5ba89341f39bec031f 2024-11-25T17:09:20,864 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3225429522644d8580722eca99f2ee0a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/3225429522644d8580722eca99f2ee0a 2024-11-25T17:09:20,865 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/cece873f94e948df9fed4025591919c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/cece873f94e948df9fed4025591919c4 2024-11-25T17:09:20,866 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/a80493a9d10a41cfaf401fe3271951a3 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/a80493a9d10a41cfaf401fe3271951a3 2024-11-25T17:09:20,866 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/a850fe2cc1044d5198e539e0e81357f5 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/a850fe2cc1044d5198e539e0e81357f5 2024-11-25T17:09:20,867 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/eb7a0a198af5454e939f4a7ad4bede05 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/eb7a0a198af5454e939f4a7ad4bede05 2024-11-25T17:09:20,868 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/5af914333a5b4be09008666ad5bab2af to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/5af914333a5b4be09008666ad5bab2af 2024-11-25T17:09:20,869 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/f723f4fb68974d7f82124ecea5f0b71a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/f723f4fb68974d7f82124ecea5f0b71a 2024-11-25T17:09:20,870 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/d6cdf6321f664397a9e44a05f7708e14 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/d6cdf6321f664397a9e44a05f7708e14 2024-11-25T17:09:20,871 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/313e9aa000ee43219d4da2cd41845dc0 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/313e9aa000ee43219d4da2cd41845dc0 2024-11-25T17:09:20,872 DEBUG [StoreCloser-TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/6a28f416e3aa4abe89b8164451b2e07b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/6a28f416e3aa4abe89b8164451b2e07b 2024-11-25T17:09:20,876 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/recovered.edits/523.seqid, newMaxSeqId=523, maxSeqId=1 2024-11-25T17:09:20,877 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813. 2024-11-25T17:09:20,877 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for 26bdcc7959673ac8abf209b84227d813: 2024-11-25T17:09:20,878 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed 26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:20,879 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=26bdcc7959673ac8abf209b84227d813, regionState=CLOSED 2024-11-25T17:09:20,881 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-25T17:09:20,881 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure 26bdcc7959673ac8abf209b84227d813, server=6579369734b6,41865,1732554474464 in 732 msec 2024-11-25T17:09:20,882 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-11-25T17:09:20,882 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=26bdcc7959673ac8abf209b84227d813, UNASSIGN in 736 msec 2024-11-25T17:09:20,883 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-25T17:09:20,883 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 738 msec 2024-11-25T17:09:20,884 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554560884"}]},"ts":"1732554560884"} 2024-11-25T17:09:20,885 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-25T17:09:20,887 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-25T17:09:20,888 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 749 msec 2024-11-25T17:09:21,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-25T17:09:21,255 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-11-25T17:09:21,255 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-25T17:09:21,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:21,257 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:21,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-25T17:09:21,257 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=97, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:21,259 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:21,261 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/recovered.edits] 2024-11-25T17:09:21,263 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/04650d5f184f459ba80c981f3c7643a1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/04650d5f184f459ba80c981f3c7643a1 2024-11-25T17:09:21,264 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/353921ee2e9a4322929cad161908a4b1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/353921ee2e9a4322929cad161908a4b1 2024-11-25T17:09:21,265 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/8f668fca3a374fd19231ec0ec15c10b7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/A/8f668fca3a374fd19231ec0ec15c10b7 2024-11-25T17:09:21,267 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/0c61ce85d1184088a3842bbde3883977 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/0c61ce85d1184088a3842bbde3883977 2024-11-25T17:09:21,268 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/9b179c0b277845919c7499e19ad5dbf1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/9b179c0b277845919c7499e19ad5dbf1 2024-11-25T17:09:21,268 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/fa7a8e20e5f64d85a11aef38ba4f187f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/B/fa7a8e20e5f64d85a11aef38ba4f187f 2024-11-25T17:09:21,270 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/45c1fe346e8e415eb36d1c7aa9a55545 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/45c1fe346e8e415eb36d1c7aa9a55545 2024-11-25T17:09:21,271 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/9270067ab5dc46c0927f5ed9b834d795 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/9270067ab5dc46c0927f5ed9b834d795 2024-11-25T17:09:21,272 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/cd8c6e260c4141fbbcbc9beab1b81876 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/C/cd8c6e260c4141fbbcbc9beab1b81876 2024-11-25T17:09:21,275 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/recovered.edits/523.seqid to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813/recovered.edits/523.seqid 2024-11-25T17:09:21,275 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/26bdcc7959673ac8abf209b84227d813 2024-11-25T17:09:21,275 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-25T17:09:21,277 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=97, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:21,281 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-25T17:09:21,283 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-25T17:09:21,284 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=97, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:21,284 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-25T17:09:21,284 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732554561284"}]},"ts":"9223372036854775807"} 2024-11-25T17:09:21,285 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-25T17:09:21,285 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 26bdcc7959673ac8abf209b84227d813, NAME => 'TestAcidGuarantees,,1732554535032.26bdcc7959673ac8abf209b84227d813.', STARTKEY => '', ENDKEY => ''}] 2024-11-25T17:09:21,286 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-25T17:09:21,286 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732554561286"}]},"ts":"9223372036854775807"} 2024-11-25T17:09:21,287 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-25T17:09:21,289 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=97, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:21,290 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 34 msec 2024-11-25T17:09:21,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-25T17:09:21,358 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 97 completed 2024-11-25T17:09:21,369 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=239 (was 242), OpenFileDescriptor=451 (was 463), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=708 (was 641) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2232 (was 2546) 2024-11-25T17:09:21,379 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=239, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=708, ProcessCount=11, AvailableMemoryMB=2231 2024-11-25T17:09:21,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-25T17:09:21,381 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T17:09:21,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:21,382 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T17:09:21,382 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:21,382 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 98 2024-11-25T17:09:21,383 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T17:09:21,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-25T17:09:21,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742167_1343 (size=963) 2024-11-25T17:09:21,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-25T17:09:21,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-25T17:09:21,795 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4 2024-11-25T17:09:21,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742168_1344 (size=53) 2024-11-25T17:09:21,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-25T17:09:22,200 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:09:22,201 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 59409bf208d66df7ccc7026d9c7a73c4, disabling compactions & flushes 2024-11-25T17:09:22,201 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:22,201 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:22,201 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. after waiting 0 ms 2024-11-25T17:09:22,201 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:22,201 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:22,201 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:22,202 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T17:09:22,202 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732554562202"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732554562202"}]},"ts":"1732554562202"} 2024-11-25T17:09:22,203 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-25T17:09:22,203 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T17:09:22,204 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554562203"}]},"ts":"1732554562203"} 2024-11-25T17:09:22,204 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-25T17:09:22,208 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=59409bf208d66df7ccc7026d9c7a73c4, ASSIGN}] 2024-11-25T17:09:22,209 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=59409bf208d66df7ccc7026d9c7a73c4, ASSIGN 2024-11-25T17:09:22,209 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=59409bf208d66df7ccc7026d9c7a73c4, ASSIGN; state=OFFLINE, location=6579369734b6,41865,1732554474464; forceNewPlan=false, retain=false 2024-11-25T17:09:22,360 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=59409bf208d66df7ccc7026d9c7a73c4, regionState=OPENING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:09:22,361 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; OpenRegionProcedure 59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:09:22,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-25T17:09:22,512 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:22,515 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:22,515 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7285): Opening region: {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} 2024-11-25T17:09:22,515 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:22,515 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:09:22,515 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7327): checking encryption for 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:22,515 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7330): checking classloading for 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:22,516 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:22,517 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:09:22,518 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59409bf208d66df7ccc7026d9c7a73c4 columnFamilyName A 2024-11-25T17:09:22,518 DEBUG [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:22,518 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.HStore(327): Store=59409bf208d66df7ccc7026d9c7a73c4/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:09:22,518 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:22,519 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:09:22,519 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59409bf208d66df7ccc7026d9c7a73c4 columnFamilyName B 2024-11-25T17:09:22,519 DEBUG [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:22,519 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.HStore(327): Store=59409bf208d66df7ccc7026d9c7a73c4/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:09:22,520 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:22,520 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:09:22,520 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59409bf208d66df7ccc7026d9c7a73c4 columnFamilyName C 2024-11-25T17:09:22,521 DEBUG [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:22,521 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.HStore(327): Store=59409bf208d66df7ccc7026d9c7a73c4/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:09:22,521 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:22,522 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:22,522 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:22,523 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T17:09:22,524 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1085): writing seq id for 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:22,525 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T17:09:22,526 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1102): Opened 59409bf208d66df7ccc7026d9c7a73c4; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74525045, jitterRate=0.1105097085237503}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T17:09:22,526 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1001): Region open journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:22,527 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., pid=100, masterSystemTime=1732554562512 2024-11-25T17:09:22,528 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:22,528 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:22,529 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=59409bf208d66df7ccc7026d9c7a73c4, regionState=OPEN, openSeqNum=2, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:09:22,530 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-25T17:09:22,530 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; OpenRegionProcedure 59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 in 168 msec 2024-11-25T17:09:22,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-11-25T17:09:22,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=59409bf208d66df7ccc7026d9c7a73c4, ASSIGN in 322 msec 2024-11-25T17:09:22,532 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T17:09:22,532 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554562532"}]},"ts":"1732554562532"} 2024-11-25T17:09:22,533 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-25T17:09:22,535 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T17:09:22,536 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1540 sec 2024-11-25T17:09:22,624 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T17:09:23,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-25T17:09:23,487 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-11-25T17:09:23,488 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68ad882f to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f5b2180 2024-11-25T17:09:23,492 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34becda3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:23,494 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:23,495 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57038, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:23,496 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T17:09:23,496 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34076, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T17:09:23,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-25T17:09:23,498 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.3 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T17:09:23,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=101, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:23,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742169_1345 (size=999) 2024-11-25T17:09:23,915 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-25T17:09:23,915 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-25T17:09:23,917 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-25T17:09:23,919 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=59409bf208d66df7ccc7026d9c7a73c4, REOPEN/MOVE}] 2024-11-25T17:09:23,920 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=59409bf208d66df7ccc7026d9c7a73c4, REOPEN/MOVE 2024-11-25T17:09:23,921 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=59409bf208d66df7ccc7026d9c7a73c4, regionState=CLOSING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:09:23,922 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-25T17:09:23,922 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE; CloseRegionProcedure 59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:09:24,073 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:24,074 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(124): Close 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:24,074 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-25T17:09:24,074 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1681): Closing 59409bf208d66df7ccc7026d9c7a73c4, disabling compactions & flushes 2024-11-25T17:09:24,074 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:24,074 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:24,074 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. after waiting 0 ms 2024-11-25T17:09:24,074 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:24,078 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-25T17:09:24,079 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:24,079 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1635): Region close journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:24,079 WARN [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegionServer(3786): Not adding moved region record: 59409bf208d66df7ccc7026d9c7a73c4 to self. 2024-11-25T17:09:24,081 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=59409bf208d66df7ccc7026d9c7a73c4, regionState=CLOSED 2024-11-25T17:09:24,081 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(170): Closed 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:24,083 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-11-25T17:09:24,084 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; CloseRegionProcedure 59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 in 160 msec 2024-11-25T17:09:24,087 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=59409bf208d66df7ccc7026d9c7a73c4, REOPEN/MOVE; state=CLOSED, location=6579369734b6,41865,1732554474464; forceNewPlan=false, retain=true 2024-11-25T17:09:24,193 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-25T17:09:24,237 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=59409bf208d66df7ccc7026d9c7a73c4, regionState=OPENING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,239 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=103, state=RUNNABLE; OpenRegionProcedure 59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:09:24,390 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:24,393 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:24,393 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7285): Opening region: {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} 2024-11-25T17:09:24,394 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:24,394 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:09:24,394 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7327): checking encryption for 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:24,394 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7330): checking classloading for 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:24,396 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:24,397 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:09:24,398 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59409bf208d66df7ccc7026d9c7a73c4 columnFamilyName A 2024-11-25T17:09:24,402 DEBUG [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:24,402 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.HStore(327): Store=59409bf208d66df7ccc7026d9c7a73c4/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:09:24,403 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:24,403 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:09:24,404 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59409bf208d66df7ccc7026d9c7a73c4 columnFamilyName B 2024-11-25T17:09:24,404 DEBUG [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:24,404 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.HStore(327): Store=59409bf208d66df7ccc7026d9c7a73c4/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:09:24,404 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:24,405 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:09:24,405 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59409bf208d66df7ccc7026d9c7a73c4 columnFamilyName C 2024-11-25T17:09:24,405 DEBUG [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:24,405 INFO [StoreOpener-59409bf208d66df7ccc7026d9c7a73c4-1 {}] regionserver.HStore(327): Store=59409bf208d66df7ccc7026d9c7a73c4/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:09:24,406 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:24,406 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:24,407 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:24,408 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T17:09:24,409 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1085): writing seq id for 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:24,413 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1102): Opened 59409bf208d66df7ccc7026d9c7a73c4; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59329710, jitterRate=-0.11591842770576477}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T17:09:24,414 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1001): Region open journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:24,415 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., pid=105, masterSystemTime=1732554564390 2024-11-25T17:09:24,417 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:24,417 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:24,417 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=59409bf208d66df7ccc7026d9c7a73c4, regionState=OPEN, openSeqNum=5, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,419 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=103 2024-11-25T17:09:24,419 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=103, state=SUCCESS; OpenRegionProcedure 59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 in 180 msec 2024-11-25T17:09:24,421 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-25T17:09:24,421 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=59409bf208d66df7ccc7026d9c7a73c4, REOPEN/MOVE in 500 msec 2024-11-25T17:09:24,423 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-11-25T17:09:24,423 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 504 msec 2024-11-25T17:09:24,425 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 926 msec 2024-11-25T17:09:24,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-11-25T17:09:24,427 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b976e1a to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1df61dc9 2024-11-25T17:09:24,436 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fe71801, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:24,437 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b82ba2a to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3637e4c6 2024-11-25T17:09:24,441 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51f7d511, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:24,441 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7b6cf8cb to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72f422b4 2024-11-25T17:09:24,444 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc42ea6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:24,445 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ec15031 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2df33cdf 2024-11-25T17:09:24,450 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@117e86d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:24,451 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3dd5b441 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9f472e0 2024-11-25T17:09:24,454 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd96549, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:24,455 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x167a78b0 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@31aea41b 2024-11-25T17:09:24,458 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3875c8c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:24,459 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5aee939b to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e247aa1 2024-11-25T17:09:24,462 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@801ba40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:24,463 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f49665c to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2205f666 2024-11-25T17:09:24,467 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27539bdc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:24,467 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683f8469 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6584e9ce 2024-11-25T17:09:24,471 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e3203d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:24,471 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75e4d3d0 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37ec8e3b 2024-11-25T17:09:24,474 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@798e7fd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:24,477 DEBUG [hconnection-0x12270ebb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:24,478 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:24,479 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57054, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-25T17:09:24,480 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-25T17:09:24,480 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:24,481 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:24,481 DEBUG [hconnection-0x67073526-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:24,482 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:24,485 DEBUG [hconnection-0x43a5bf7f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:24,486 DEBUG [hconnection-0x5b7d5ac4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:24,486 DEBUG [hconnection-0x31bef7b7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:24,487 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57078, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:24,487 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57082, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:24,487 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57086, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:24,493 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:09:24,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:24,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:24,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:24,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:24,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:24,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:24,495 DEBUG [hconnection-0x2f8eebd9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:24,495 DEBUG [hconnection-0x18825739-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:24,496 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57104, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:24,496 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57090, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:24,509 DEBUG [hconnection-0x3893b784-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:24,510 DEBUG [hconnection-0x3af42210-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:24,510 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57118, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:24,511 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57132, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:24,512 DEBUG [hconnection-0x1cbe5ed1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:24,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554624520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554624520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,521 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57142, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:24,522 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:24,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554624521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,522 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:24,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554624522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,523 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554624523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,539 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125ab909632f6bc44358218679b2b1459f5_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554564492/Put/seqid=0 2024-11-25T17:09:24,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742170_1346 (size=9714) 2024-11-25T17:09:24,573 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:24,579 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125ab909632f6bc44358218679b2b1459f5_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ab909632f6bc44358218679b2b1459f5_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:24,580 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/2c6a07e6d8734ece8ff614937b946d24, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:24,580 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/2c6a07e6d8734ece8ff614937b946d24 is 175, key is test_row_0/A:col10/1732554564492/Put/seqid=0 2024-11-25T17:09:24,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-25T17:09:24,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554624621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554624622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554624623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554624623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554624624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,632 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:24,632 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-25T17:09:24,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:24,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:24,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:24,633 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:24,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:24,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742171_1347 (size=22361) 2024-11-25T17:09:24,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-25T17:09:24,785 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:24,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-25T17:09:24,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:24,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:24,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:24,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:24,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:24,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:24,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:24,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554624824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:24,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554624824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:24,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554624825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:24,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554624825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:24,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554624826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:24,937 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:24,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-25T17:09:24,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:24,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:24,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:24,938 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:24,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:24,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,036 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/2c6a07e6d8734ece8ff614937b946d24 2024-11-25T17:09:25,064 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/41047f472e724b388321ed62748895e4 is 50, key is test_row_0/B:col10/1732554564492/Put/seqid=0 2024-11-25T17:09:25,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-25T17:09:25,090 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:25,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742172_1348 (size=9657) 2024-11-25T17:09:25,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-25T17:09:25,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:25,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:25,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:25,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/41047f472e724b388321ed62748895e4 2024-11-25T17:09:25,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554625128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554625129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554625129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554625130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554625131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/f1c6084ef7044e1c891c030e9d5a9112 is 50, key is test_row_0/C:col10/1732554564492/Put/seqid=0 2024-11-25T17:09:25,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742173_1349 (size=9657) 2024-11-25T17:09:25,243 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:25,243 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-25T17:09:25,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:25,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:25,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:25,244 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,396 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:25,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-25T17:09:25,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:25,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:25,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:25,396 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,548 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/f1c6084ef7044e1c891c030e9d5a9112 2024-11-25T17:09:25,549 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:25,549 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-25T17:09:25,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:25,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:25,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:25,550 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/2c6a07e6d8734ece8ff614937b946d24 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/2c6a07e6d8734ece8ff614937b946d24 2024-11-25T17:09:25,558 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/2c6a07e6d8734ece8ff614937b946d24, entries=100, sequenceid=17, filesize=21.8 K 2024-11-25T17:09:25,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/41047f472e724b388321ed62748895e4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/41047f472e724b388321ed62748895e4 2024-11-25T17:09:25,565 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/41047f472e724b388321ed62748895e4, entries=100, sequenceid=17, filesize=9.4 K 2024-11-25T17:09:25,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/f1c6084ef7044e1c891c030e9d5a9112 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f1c6084ef7044e1c891c030e9d5a9112 2024-11-25T17:09:25,572 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f1c6084ef7044e1c891c030e9d5a9112, entries=100, sequenceid=17, filesize=9.4 K 2024-11-25T17:09:25,573 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 59409bf208d66df7ccc7026d9c7a73c4 in 1080ms, sequenceid=17, compaction requested=false 2024-11-25T17:09:25,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:25,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-25T17:09:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:25,636 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-25T17:09:25,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:25,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:25,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:25,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:25,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:25,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:25,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554625644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554625644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411257f706515bf534a819c9dd85ee2c6c368_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554564514/Put/seqid=0 2024-11-25T17:09:25,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554625646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554625647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742174_1350 (size=17034) 2024-11-25T17:09:25,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554625649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,655 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:25,658 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411257f706515bf534a819c9dd85ee2c6c368_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411257f706515bf534a819c9dd85ee2c6c368_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:25,659 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/0c9eb5123683424d97c4dd8c85700a7b, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:25,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/0c9eb5123683424d97c4dd8c85700a7b is 175, key is test_row_0/A:col10/1732554564514/Put/seqid=0 2024-11-25T17:09:25,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742175_1351 (size=48139) 2024-11-25T17:09:25,669 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/0c9eb5123683424d97c4dd8c85700a7b 2024-11-25T17:09:25,675 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/bf57c70851af47e4bc3b9b3048a0ed2f is 50, key is test_row_0/B:col10/1732554564514/Put/seqid=0 2024-11-25T17:09:25,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742176_1352 (size=12001) 2024-11-25T17:09:25,702 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:25,702 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-25T17:09:25,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:25,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:25,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:25,703 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554625750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554625750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554625756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554625757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554625757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,855 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:25,855 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-25T17:09:25,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:25,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:25,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:25,855 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:25,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554625955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554625956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554625961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554625961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:25,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:25,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554625962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:26,007 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:26,008 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-25T17:09:26,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:26,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:26,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:26,008 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:26,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:26,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:26,081 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/bf57c70851af47e4bc3b9b3048a0ed2f 2024-11-25T17:09:26,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/c2bfdc43c73a42a593bbd90de74c94d7 is 50, key is test_row_0/C:col10/1732554564514/Put/seqid=0 2024-11-25T17:09:26,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742177_1353 (size=12001) 2024-11-25T17:09:26,160 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:26,160 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-25T17:09:26,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:26,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:26,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:26,161 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:26,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:26,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:26,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:26,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554626260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:26,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:26,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554626261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:26,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:26,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554626266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:26,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:26,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554626267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:26,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:26,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554626268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:26,312 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:26,313 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-25T17:09:26,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:26,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:26,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:26,313 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:26,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:26,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:26,413 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-25T17:09:26,464 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:26,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-25T17:09:26,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:26,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:26,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:26,465 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:26,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:26,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:26,500 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/c2bfdc43c73a42a593bbd90de74c94d7 2024-11-25T17:09:26,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/0c9eb5123683424d97c4dd8c85700a7b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/0c9eb5123683424d97c4dd8c85700a7b 2024-11-25T17:09:26,516 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/0c9eb5123683424d97c4dd8c85700a7b, entries=250, sequenceid=42, filesize=47.0 K 2024-11-25T17:09:26,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/bf57c70851af47e4bc3b9b3048a0ed2f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/bf57c70851af47e4bc3b9b3048a0ed2f 2024-11-25T17:09:26,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/bf57c70851af47e4bc3b9b3048a0ed2f, entries=150, sequenceid=42, filesize=11.7 K 2024-11-25T17:09:26,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/c2bfdc43c73a42a593bbd90de74c94d7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c2bfdc43c73a42a593bbd90de74c94d7 2024-11-25T17:09:26,528 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c2bfdc43c73a42a593bbd90de74c94d7, entries=150, sequenceid=42, filesize=11.7 K 2024-11-25T17:09:26,529 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 59409bf208d66df7ccc7026d9c7a73c4 in 893ms, sequenceid=42, compaction requested=false 2024-11-25T17:09:26,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:26,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-25T17:09:26,616 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:26,617 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-25T17:09:26,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:26,617 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:09:26,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:26,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:26,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:26,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:26,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:26,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:26,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112529f6b22bd76444d4916b89efcd0f4e74_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554565648/Put/seqid=0 2024-11-25T17:09:26,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742178_1354 (size=12154) 2024-11-25T17:09:26,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:26,665 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112529f6b22bd76444d4916b89efcd0f4e74_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112529f6b22bd76444d4916b89efcd0f4e74_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:26,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/45583ed1941c4038a4f91629b50f850a, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:26,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/45583ed1941c4038a4f91629b50f850a is 175, key is test_row_0/A:col10/1732554565648/Put/seqid=0 2024-11-25T17:09:26,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742179_1355 (size=30955) 2024-11-25T17:09:26,688 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/45583ed1941c4038a4f91629b50f850a 2024-11-25T17:09:26,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/cd4b080922df40eca9f0ddebbf5dc375 is 50, key is test_row_0/B:col10/1732554565648/Put/seqid=0 2024-11-25T17:09:26,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742180_1356 (size=12001) 2024-11-25T17:09:26,736 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/cd4b080922df40eca9f0ddebbf5dc375 2024-11-25T17:09:26,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/cd618b4f25434ffbac8d04fdd236b451 is 50, key is test_row_0/C:col10/1732554565648/Put/seqid=0 2024-11-25T17:09:26,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742181_1357 (size=12001) 2024-11-25T17:09:26,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:26,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:26,773 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/cd618b4f25434ffbac8d04fdd236b451 2024-11-25T17:09:26,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/45583ed1941c4038a4f91629b50f850a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/45583ed1941c4038a4f91629b50f850a 2024-11-25T17:09:26,781 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/45583ed1941c4038a4f91629b50f850a, entries=150, sequenceid=53, filesize=30.2 K 2024-11-25T17:09:26,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/cd4b080922df40eca9f0ddebbf5dc375 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/cd4b080922df40eca9f0ddebbf5dc375 2024-11-25T17:09:26,792 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/cd4b080922df40eca9f0ddebbf5dc375, entries=150, sequenceid=53, filesize=11.7 K 2024-11-25T17:09:26,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/cd618b4f25434ffbac8d04fdd236b451 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/cd618b4f25434ffbac8d04fdd236b451 2024-11-25T17:09:26,799 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/cd618b4f25434ffbac8d04fdd236b451, entries=150, sequenceid=53, filesize=11.7 K 2024-11-25T17:09:26,800 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=67.09 KB/68700 for 59409bf208d66df7ccc7026d9c7a73c4 in 183ms, sequenceid=53, compaction requested=true 2024-11-25T17:09:26,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:26,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:26,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-25T17:09:26,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:26,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-25T17:09:26,804 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-25T17:09:26,804 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3220 sec 2024-11-25T17:09:26,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-25T17:09:26,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:26,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:26,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:26,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:26,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:26,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:26,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 2.3260 sec 2024-11-25T17:09:26,815 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125ea1c7bb8f798448280cd453683744753_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554566790/Put/seqid=0 2024-11-25T17:09:26,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742182_1358 (size=14594) 2024-11-25T17:09:26,835 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:26,839 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125ea1c7bb8f798448280cd453683744753_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ea1c7bb8f798448280cd453683744753_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:26,840 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/f3a5b2944ba04d599f9a82125787c015, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:26,841 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/f3a5b2944ba04d599f9a82125787c015 is 175, key is test_row_0/A:col10/1732554566790/Put/seqid=0 2024-11-25T17:09:26,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:26,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554626841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:26,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:26,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554626842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:26,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:26,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554626842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:26,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:26,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554626844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:26,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:26,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554626845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:26,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742183_1359 (size=39549) 2024-11-25T17:09:26,956 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:26,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554626952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:26,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:26,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554626953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:26,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:26,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554626955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:26,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:26,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554626955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:26,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:26,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554626958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:27,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:27,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554627157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:27,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:27,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554627159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:27,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:27,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554627159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:27,166 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:27,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554627160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:27,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:27,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554627168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:27,273 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=68, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/f3a5b2944ba04d599f9a82125787c015 2024-11-25T17:09:27,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/8519a0d6e9164fedad891329d582b6e1 is 50, key is test_row_0/B:col10/1732554566790/Put/seqid=0 2024-11-25T17:09:27,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742184_1360 (size=12001) 2024-11-25T17:09:27,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:27,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554627466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:27,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:27,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554627467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:27,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:27,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554627467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:27,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:27,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554627468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:27,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:27,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554627473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:27,715 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=68 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/8519a0d6e9164fedad891329d582b6e1 2024-11-25T17:09:27,732 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/0affec2130284068a2c9563c2d369224 is 50, key is test_row_0/C:col10/1732554566790/Put/seqid=0 2024-11-25T17:09:27,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742185_1361 (size=12001) 2024-11-25T17:09:27,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:27,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554627974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:27,980 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:27,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554627976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:27,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:27,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554627977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:27,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:27,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554627977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:27,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:27,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554627979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:28,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=68 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/0affec2130284068a2c9563c2d369224 2024-11-25T17:09:28,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/f3a5b2944ba04d599f9a82125787c015 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/f3a5b2944ba04d599f9a82125787c015 2024-11-25T17:09:28,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/f3a5b2944ba04d599f9a82125787c015, entries=200, sequenceid=68, filesize=38.6 K 2024-11-25T17:09:28,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/8519a0d6e9164fedad891329d582b6e1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/8519a0d6e9164fedad891329d582b6e1 2024-11-25T17:09:28,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/8519a0d6e9164fedad891329d582b6e1, entries=150, sequenceid=68, filesize=11.7 K 2024-11-25T17:09:28,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/0affec2130284068a2c9563c2d369224 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/0affec2130284068a2c9563c2d369224 2024-11-25T17:09:28,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/0affec2130284068a2c9563c2d369224, entries=150, sequenceid=68, filesize=11.7 K 2024-11-25T17:09:28,157 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-25T17:09:28,158 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 59409bf208d66df7ccc7026d9c7a73c4 in 1354ms, sequenceid=68, compaction requested=true 2024-11-25T17:09:28,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:28,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:28,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:28,158 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:28,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:28,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:28,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:28,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:28,158 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:28,159 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:28,159 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/A is initiating minor compaction (all files) 2024-11-25T17:09:28,159 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/A in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:28,159 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/2c6a07e6d8734ece8ff614937b946d24, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/0c9eb5123683424d97c4dd8c85700a7b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/45583ed1941c4038a4f91629b50f850a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/f3a5b2944ba04d599f9a82125787c015] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=137.7 K 2024-11-25T17:09:28,159 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:28,159 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/2c6a07e6d8734ece8ff614937b946d24, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/0c9eb5123683424d97c4dd8c85700a7b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/45583ed1941c4038a4f91629b50f850a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/f3a5b2944ba04d599f9a82125787c015] 2024-11-25T17:09:28,160 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:28,160 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c6a07e6d8734ece8ff614937b946d24, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732554564492 2024-11-25T17:09:28,160 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/B is initiating minor compaction (all files) 2024-11-25T17:09:28,160 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/B in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:28,160 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/41047f472e724b388321ed62748895e4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/bf57c70851af47e4bc3b9b3048a0ed2f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/cd4b080922df40eca9f0ddebbf5dc375, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/8519a0d6e9164fedad891329d582b6e1] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=44.6 K 2024-11-25T17:09:28,160 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c9eb5123683424d97c4dd8c85700a7b, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732554564511 2024-11-25T17:09:28,160 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 41047f472e724b388321ed62748895e4, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732554564492 2024-11-25T17:09:28,161 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45583ed1941c4038a4f91629b50f850a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732554565641 2024-11-25T17:09:28,161 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting bf57c70851af47e4bc3b9b3048a0ed2f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732554564514 2024-11-25T17:09:28,161 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3a5b2944ba04d599f9a82125787c015, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=68, earliestPutTs=1732554566790 2024-11-25T17:09:28,162 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting cd4b080922df40eca9f0ddebbf5dc375, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732554565641 2024-11-25T17:09:28,162 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 8519a0d6e9164fedad891329d582b6e1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=68, earliestPutTs=1732554566790 2024-11-25T17:09:28,169 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:28,170 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#B#compaction#310 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:28,170 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/5891127b8e654972ba532afd36a39340 is 50, key is test_row_0/B:col10/1732554566790/Put/seqid=0 2024-11-25T17:09:28,170 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411259d311e4441fc4b6e9692d51fc462f040_59409bf208d66df7ccc7026d9c7a73c4 store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:28,173 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411259d311e4441fc4b6e9692d51fc462f040_59409bf208d66df7ccc7026d9c7a73c4, store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:28,173 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411259d311e4441fc4b6e9692d51fc462f040_59409bf208d66df7ccc7026d9c7a73c4 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:28,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742186_1362 (size=12139) 2024-11-25T17:09:28,213 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/5891127b8e654972ba532afd36a39340 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5891127b8e654972ba532afd36a39340 2024-11-25T17:09:28,220 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/B of 59409bf208d66df7ccc7026d9c7a73c4 into 5891127b8e654972ba532afd36a39340(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:28,220 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:28,220 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/B, priority=12, startTime=1732554568158; duration=0sec 2024-11-25T17:09:28,221 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:28,221 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:B 2024-11-25T17:09:28,221 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:28,222 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:28,222 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/C is initiating minor compaction (all files) 2024-11-25T17:09:28,222 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/C in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:28,222 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f1c6084ef7044e1c891c030e9d5a9112, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c2bfdc43c73a42a593bbd90de74c94d7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/cd618b4f25434ffbac8d04fdd236b451, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/0affec2130284068a2c9563c2d369224] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=44.6 K 2024-11-25T17:09:28,223 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting f1c6084ef7044e1c891c030e9d5a9112, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732554564492 2024-11-25T17:09:28,223 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting c2bfdc43c73a42a593bbd90de74c94d7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732554564514 2024-11-25T17:09:28,223 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting cd618b4f25434ffbac8d04fdd236b451, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732554565641 2024-11-25T17:09:28,224 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 0affec2130284068a2c9563c2d369224, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=68, earliestPutTs=1732554566790 2024-11-25T17:09:28,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742187_1363 (size=4469) 2024-11-25T17:09:28,229 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#A#compaction#311 average throughput is 0.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:28,230 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/0a083c31fc974df593ddfc4b6a99a661 is 175, key is test_row_0/A:col10/1732554566790/Put/seqid=0 2024-11-25T17:09:28,250 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#C#compaction#312 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:28,250 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/5fb1dcb401e24d1691aa08adaf122f2d is 50, key is test_row_0/C:col10/1732554566790/Put/seqid=0 2024-11-25T17:09:28,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742188_1364 (size=31093) 2024-11-25T17:09:28,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742189_1365 (size=12139) 2024-11-25T17:09:28,282 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/5fb1dcb401e24d1691aa08adaf122f2d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/5fb1dcb401e24d1691aa08adaf122f2d 2024-11-25T17:09:28,287 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/C of 59409bf208d66df7ccc7026d9c7a73c4 into 5fb1dcb401e24d1691aa08adaf122f2d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:28,287 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:28,287 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/C, priority=12, startTime=1732554568158; duration=0sec 2024-11-25T17:09:28,288 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:28,288 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:C 2024-11-25T17:09:28,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-25T17:09:28,588 INFO [Thread-1567 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-25T17:09:28,589 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:28,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-25T17:09:28,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-25T17:09:28,591 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:28,591 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:28,591 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:28,681 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/0a083c31fc974df593ddfc4b6a99a661 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/0a083c31fc974df593ddfc4b6a99a661 2024-11-25T17:09:28,687 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/A of 59409bf208d66df7ccc7026d9c7a73c4 into 0a083c31fc974df593ddfc4b6a99a661(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:28,687 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:28,687 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/A, priority=12, startTime=1732554568158; duration=0sec 2024-11-25T17:09:28,688 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:28,688 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:A 2024-11-25T17:09:28,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-25T17:09:28,742 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:28,745 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-25T17:09:28,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:28,746 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-25T17:09:28,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:28,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:28,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:28,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:28,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:28,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:28,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411259b160196c880479fb735d705efa3a64f_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554566844/Put/seqid=0 2024-11-25T17:09:28,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742190_1366 (size=12154) 2024-11-25T17:09:28,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,782 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411259b160196c880479fb735d705efa3a64f_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411259b160196c880479fb735d705efa3a64f_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:28,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/531c6c3dc8564ff6a766c02ab8a51616, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:28,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/531c6c3dc8564ff6a766c02ab8a51616 is 175, key is test_row_0/A:col10/1732554566844/Put/seqid=0 2024-11-25T17:09:28,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742191_1367 (size=30955) 2024-11-25T17:09:28,791 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=92, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/531c6c3dc8564ff6a766c02ab8a51616 2024-11-25T17:09:28,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/643fb7d9d56945dbaa1924634a802297 is 50, key is test_row_0/B:col10/1732554566844/Put/seqid=0 2024-11-25T17:09:28,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742192_1368 (size=12001) 2024-11-25T17:09:28,812 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/643fb7d9d56945dbaa1924634a802297 2024-11-25T17:09:28,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/da37ba0f91364b48aa54173e08d03cdc is 50, key is test_row_0/C:col10/1732554566844/Put/seqid=0 2024-11-25T17:09:28,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742193_1369 (size=12001) 2024-11-25T17:09:28,825 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/da37ba0f91364b48aa54173e08d03cdc 2024-11-25T17:09:28,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/531c6c3dc8564ff6a766c02ab8a51616 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/531c6c3dc8564ff6a766c02ab8a51616 2024-11-25T17:09:28,832 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/531c6c3dc8564ff6a766c02ab8a51616, entries=150, sequenceid=92, filesize=30.2 K 2024-11-25T17:09:28,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/643fb7d9d56945dbaa1924634a802297 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/643fb7d9d56945dbaa1924634a802297 2024-11-25T17:09:28,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,836 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/643fb7d9d56945dbaa1924634a802297, entries=150, sequenceid=92, filesize=11.7 K 2024-11-25T17:09:28,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/da37ba0f91364b48aa54173e08d03cdc as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/da37ba0f91364b48aa54173e08d03cdc 2024-11-25T17:09:28,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,842 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/da37ba0f91364b48aa54173e08d03cdc, entries=150, sequenceid=92, filesize=11.7 K 2024-11-25T17:09:28,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,843 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=0 B/0 for 59409bf208d66df7ccc7026d9c7a73c4 in 97ms, sequenceid=92, compaction requested=false 2024-11-25T17:09:28,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:28,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:28,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-25T17:09:28,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-25T17:09:28,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,846 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-25T17:09:28,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,846 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 254 msec 2024-11-25T17:09:28,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,847 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 258 msec 2024-11-25T17:09:28,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-25T17:09:28,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,892 INFO [Thread-1567 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-25T17:09:28,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,894 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:28,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-25T17:09:28,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-25T17:09:28,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,895 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:28,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,896 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:28,896 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:28,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-25T17:09:28,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:28,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,048 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:29,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,049 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-25T17:09:29,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:29,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,051 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-11-25T17:09:29,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:29,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:29,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:29,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:29,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:29,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:29,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112532363a3659a146deaad937abbdb08a00_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_2/A:col10/1732554569051/Put/seqid=0 2024-11-25T17:09:29,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742194_1370 (size=9714) 2024-11-25T17:09:29,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,078 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112532363a3659a146deaad937abbdb08a00_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112532363a3659a146deaad937abbdb08a00_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:29,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/ab453e730ba7465e816660925fc2848b, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:29,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/ab453e730ba7465e816660925fc2848b is 175, key is test_row_2/A:col10/1732554569051/Put/seqid=0 2024-11-25T17:09:29,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742195_1371 (size=22359) 2024-11-25T17:09:29,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:29,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:29,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:29,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554629180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554629185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554629185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554629186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554629187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-25T17:09:29,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554629289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554629296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554629296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554629300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554629302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,491 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=97, memsize=4.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/ab453e730ba7465e816660925fc2848b 2024-11-25T17:09:29,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554629493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-25T17:09:29,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554629504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/c28b809db06d438186f3847c42057821 is 50, key is test_row_2/B:col10/1732554569051/Put/seqid=0 2024-11-25T17:09:29,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554629508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,519 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554629515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,524 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554629519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742196_1372 (size=7315) 2024-11-25T17:09:29,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554629800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554629806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554629811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554629821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:29,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554629825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:29,959 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/c28b809db06d438186f3847c42057821 2024-11-25T17:09:29,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/c5025ecf0cd3471ba6a9a69da173b54c is 50, key is test_row_2/C:col10/1732554569051/Put/seqid=0 2024-11-25T17:09:29,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-25T17:09:30,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742197_1373 (size=7315) 2024-11-25T17:09:30,013 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/c5025ecf0cd3471ba6a9a69da173b54c 2024-11-25T17:09:30,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/ab453e730ba7465e816660925fc2848b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/ab453e730ba7465e816660925fc2848b 2024-11-25T17:09:30,027 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/ab453e730ba7465e816660925fc2848b, entries=100, sequenceid=97, filesize=21.8 K 2024-11-25T17:09:30,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/c28b809db06d438186f3847c42057821 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c28b809db06d438186f3847c42057821 2024-11-25T17:09:30,033 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c28b809db06d438186f3847c42057821, entries=50, sequenceid=97, filesize=7.1 K 2024-11-25T17:09:30,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/c5025ecf0cd3471ba6a9a69da173b54c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c5025ecf0cd3471ba6a9a69da173b54c 2024-11-25T17:09:30,042 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c5025ecf0cd3471ba6a9a69da173b54c, entries=50, sequenceid=97, filesize=7.1 K 2024-11-25T17:09:30,043 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=187.85 KB/192360 for 59409bf208d66df7ccc7026d9c7a73c4 in 994ms, sequenceid=97, compaction requested=true 2024-11-25T17:09:30,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:30,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:30,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-25T17:09:30,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-25T17:09:30,062 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-25T17:09:30,062 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1610 sec 2024-11-25T17:09:30,066 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.1710 sec 2024-11-25T17:09:30,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:30,310 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=194.56 KB heapSize=510.52 KB 2024-11-25T17:09:30,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:30,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:30,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:30,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:30,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:30,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:30,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:30,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554630313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:30,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:30,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554630315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:30,318 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411258a156af6e1bd4aaeba2c24ed151106af_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554570309/Put/seqid=0 2024-11-25T17:09:30,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:30,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554630317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:30,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742198_1374 (size=14644) 2024-11-25T17:09:30,322 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,325 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411258a156af6e1bd4aaeba2c24ed151106af_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411258a156af6e1bd4aaeba2c24ed151106af_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:30,326 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/487c93c85dee41efaaa27c1e9608d4bd, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:30,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/487c93c85dee41efaaa27c1e9608d4bd is 175, key is test_row_0/A:col10/1732554570309/Put/seqid=0 2024-11-25T17:09:30,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:30,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554630326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:30,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742199_1375 (size=39599) 2024-11-25T17:09:30,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:30,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554630331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:30,347 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=129, memsize=64.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/487c93c85dee41efaaa27c1e9608d4bd 2024-11-25T17:09:30,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/9fd9ac629f274f7e9d0b4476b7c0d11c is 50, key is test_row_0/B:col10/1732554570309/Put/seqid=0 2024-11-25T17:09:30,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742200_1376 (size=12051) 2024-11-25T17:09:30,384 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/9fd9ac629f274f7e9d0b4476b7c0d11c 2024-11-25T17:09:30,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/4a194a3c57334217950ec5b0af9ecf9f is 50, key is test_row_0/C:col10/1732554570309/Put/seqid=0 2024-11-25T17:09:30,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742201_1377 (size=12051) 2024-11-25T17:09:30,420 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/4a194a3c57334217950ec5b0af9ecf9f 2024-11-25T17:09:30,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554630421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:30,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/487c93c85dee41efaaa27c1e9608d4bd as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/487c93c85dee41efaaa27c1e9608d4bd 2024-11-25T17:09:30,430 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/487c93c85dee41efaaa27c1e9608d4bd, entries=200, sequenceid=129, filesize=38.7 K 2024-11-25T17:09:30,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/9fd9ac629f274f7e9d0b4476b7c0d11c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/9fd9ac629f274f7e9d0b4476b7c0d11c 2024-11-25T17:09:30,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,437 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/9fd9ac629f274f7e9d0b4476b7c0d11c, entries=150, sequenceid=129, filesize=11.8 K 2024-11-25T17:09:30,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/4a194a3c57334217950ec5b0af9ecf9f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/4a194a3c57334217950ec5b0af9ecf9f 2024-11-25T17:09:30,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/4a194a3c57334217950ec5b0af9ecf9f, entries=150, sequenceid=129, filesize=11.8 K 2024-11-25T17:09:30,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,443 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~194.56 KB/199230, heapSize ~510.47 KB/522720, currentSize=6.71 KB/6870 for 59409bf208d66df7ccc7026d9c7a73c4 in 133ms, sequenceid=129, compaction requested=true 2024-11-25T17:09:30,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:30,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,444 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:30,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:30,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:30,444 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:30,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:30,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:30,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:30,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:30,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,447 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 43506 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:30,447 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/B is initiating minor compaction (all files) 2024-11-25T17:09:30,448 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/B in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,448 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5891127b8e654972ba532afd36a39340, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/643fb7d9d56945dbaa1924634a802297, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c28b809db06d438186f3847c42057821, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/9fd9ac629f274f7e9d0b4476b7c0d11c] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=42.5 K 2024-11-25T17:09:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,448 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 5891127b8e654972ba532afd36a39340, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=68, earliestPutTs=1732554566790 2024-11-25T17:09:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,448 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 643fb7d9d56945dbaa1924634a802297, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732554566841 2024-11-25T17:09:30,449 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting c28b809db06d438186f3847c42057821, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732554569051 2024-11-25T17:09:30,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,449 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 9fd9ac629f274f7e9d0b4476b7c0d11c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732554569177 2024-11-25T17:09:30,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,458 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124006 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:30,458 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/A is initiating minor compaction (all files) 2024-11-25T17:09:30,458 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/A in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:30,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,458 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/0a083c31fc974df593ddfc4b6a99a661, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/531c6c3dc8564ff6a766c02ab8a51616, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/ab453e730ba7465e816660925fc2848b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/487c93c85dee41efaaa27c1e9608d4bd] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=121.1 K 2024-11-25T17:09:30,458 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:30,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,458 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/0a083c31fc974df593ddfc4b6a99a661, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/531c6c3dc8564ff6a766c02ab8a51616, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/ab453e730ba7465e816660925fc2848b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/487c93c85dee41efaaa27c1e9608d4bd] 2024-11-25T17:09:30,459 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a083c31fc974df593ddfc4b6a99a661, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=68, earliestPutTs=1732554566790 2024-11-25T17:09:30,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,459 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 531c6c3dc8564ff6a766c02ab8a51616, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732554566841 2024-11-25T17:09:30,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,459 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab453e730ba7465e816660925fc2848b, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732554569048 2024-11-25T17:09:30,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,460 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 487c93c85dee41efaaa27c1e9608d4bd, keycount=200, bloomtype=ROW, size=38.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732554569176 2024-11-25T17:09:30,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,470 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:30,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,471 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#B#compaction#322 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:30,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,471 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/c106c25eda3d4e7f9011d69fc72d88fe is 50, key is test_row_0/B:col10/1732554570309/Put/seqid=0 2024-11-25T17:09:30,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,472 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411252909eef8456941f78ccfece7b5b58b29_59409bf208d66df7ccc7026d9c7a73c4 store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:30,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,475 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411252909eef8456941f78ccfece7b5b58b29_59409bf208d66df7ccc7026d9c7a73c4, store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:30,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,476 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411252909eef8456941f78ccfece7b5b58b29_59409bf208d66df7ccc7026d9c7a73c4 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:30,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742202_1378 (size=12325) 2024-11-25T17:09:30,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742203_1379 (size=4469) 2024-11-25T17:09:30,491 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#A#compaction#323 average throughput is 1.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:30,492 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/661a1b1043d14f0488af6b7d9f5dddd2 is 175, key is test_row_0/A:col10/1732554570309/Put/seqid=0 2024-11-25T17:09:30,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742204_1380 (size=31279) 2024-11-25T17:09:30,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:30,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:09:30,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:30,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:30,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:30,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:30,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:30,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:30,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:30,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125ac3f471ce852457fa349bd68d9008b40_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554570831/Put/seqid=0 2024-11-25T17:09:30,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742205_1381 (size=14794) 2024-11-25T17:09:30,894 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/c106c25eda3d4e7f9011d69fc72d88fe as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c106c25eda3d4e7f9011d69fc72d88fe 2024-11-25T17:09:30,910 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/661a1b1043d14f0488af6b7d9f5dddd2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/661a1b1043d14f0488af6b7d9f5dddd2 2024-11-25T17:09:30,917 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/B of 59409bf208d66df7ccc7026d9c7a73c4 into c106c25eda3d4e7f9011d69fc72d88fe(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:30,917 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:30,917 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/B, priority=12, startTime=1732554570444; duration=0sec 2024-11-25T17:09:30,917 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:30,917 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:B 2024-11-25T17:09:30,917 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:30,925 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/A of 59409bf208d66df7ccc7026d9c7a73c4 into 661a1b1043d14f0488af6b7d9f5dddd2(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:30,925 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:30,925 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/A, priority=12, startTime=1732554570443; duration=0sec 2024-11-25T17:09:30,925 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 43506 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:30,925 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:30,925 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:A 2024-11-25T17:09:30,925 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/C is initiating minor compaction (all files) 2024-11-25T17:09:30,925 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/C in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:30,925 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/5fb1dcb401e24d1691aa08adaf122f2d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/da37ba0f91364b48aa54173e08d03cdc, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c5025ecf0cd3471ba6a9a69da173b54c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/4a194a3c57334217950ec5b0af9ecf9f] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=42.5 K 2024-11-25T17:09:30,926 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 5fb1dcb401e24d1691aa08adaf122f2d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=68, earliestPutTs=1732554566790 2024-11-25T17:09:30,926 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting da37ba0f91364b48aa54173e08d03cdc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732554566841 2024-11-25T17:09:30,927 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting c5025ecf0cd3471ba6a9a69da173b54c, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732554569051 2024-11-25T17:09:30,927 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a194a3c57334217950ec5b0af9ecf9f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732554569177 2024-11-25T17:09:30,935 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#C#compaction#325 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:30,936 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/e851cf20085c41d2b0e9884a0d22e84e is 50, key is test_row_0/C:col10/1732554570309/Put/seqid=0 2024-11-25T17:09:30,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742206_1382 (size=12325) 2024-11-25T17:09:30,955 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/e851cf20085c41d2b0e9884a0d22e84e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/e851cf20085c41d2b0e9884a0d22e84e 2024-11-25T17:09:30,961 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/C of 59409bf208d66df7ccc7026d9c7a73c4 into e851cf20085c41d2b0e9884a0d22e84e(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:30,961 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:30,961 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/C, priority=12, startTime=1732554570445; duration=0sec 2024-11-25T17:09:30,961 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:30,962 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:C 2024-11-25T17:09:30,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:30,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554630994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:31,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-25T17:09:31,001 INFO [Thread-1567 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-25T17:09:31,002 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:31,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-25T17:09:31,004 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:31,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-25T17:09:31,004 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:31,004 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:31,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:31,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554631099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:31,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-25T17:09:31,156 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:31,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-25T17:09:31,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:31,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:31,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:31,157 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:31,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:31,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:31,271 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:31,275 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125ac3f471ce852457fa349bd68d9008b40_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ac3f471ce852457fa349bd68d9008b40_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:31,276 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/9c01e8566bb24e8495d1e82e5fe641b9, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:31,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/9c01e8566bb24e8495d1e82e5fe641b9 is 175, key is test_row_0/A:col10/1732554570831/Put/seqid=0 2024-11-25T17:09:31,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742207_1383 (size=39749) 2024-11-25T17:09:31,281 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=140, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/9c01e8566bb24e8495d1e82e5fe641b9 2024-11-25T17:09:31,288 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/719fa184e58845d78137ac4da028759b is 50, key is test_row_0/B:col10/1732554570831/Put/seqid=0 2024-11-25T17:09:31,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742208_1384 (size=12151) 2024-11-25T17:09:31,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-25T17:09:31,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:31,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554631301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:31,313 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:31,313 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-25T17:09:31,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:31,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:31,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:31,314 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:31,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:31,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:31,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:31,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554631318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:31,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:31,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554631318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:31,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:31,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554631342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:31,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:31,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554631342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:31,466 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:31,466 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-25T17:09:31,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:31,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:31,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:31,466 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:31,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:31,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:31,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-25T17:09:31,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:31,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554631607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:31,621 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:31,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-25T17:09:31,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:31,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:31,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:31,622 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:31,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:31,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:31,708 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/719fa184e58845d78137ac4da028759b 2024-11-25T17:09:31,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/464ca64464d24141b3f9cf6fb7cb3307 is 50, key is test_row_0/C:col10/1732554570831/Put/seqid=0 2024-11-25T17:09:31,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742209_1385 (size=12151) 2024-11-25T17:09:31,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/464ca64464d24141b3f9cf6fb7cb3307 2024-11-25T17:09:31,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/9c01e8566bb24e8495d1e82e5fe641b9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/9c01e8566bb24e8495d1e82e5fe641b9 2024-11-25T17:09:31,773 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/9c01e8566bb24e8495d1e82e5fe641b9, entries=200, sequenceid=140, filesize=38.8 K 2024-11-25T17:09:31,775 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:31,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/719fa184e58845d78137ac4da028759b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/719fa184e58845d78137ac4da028759b 2024-11-25T17:09:31,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-25T17:09:31,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:31,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:31,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:31,777 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:31,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:31,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:31,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/719fa184e58845d78137ac4da028759b, entries=150, sequenceid=140, filesize=11.9 K 2024-11-25T17:09:31,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/464ca64464d24141b3f9cf6fb7cb3307 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/464ca64464d24141b3f9cf6fb7cb3307 2024-11-25T17:09:31,785 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/464ca64464d24141b3f9cf6fb7cb3307, entries=150, sequenceid=140, filesize=11.9 K 2024-11-25T17:09:31,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 59409bf208d66df7ccc7026d9c7a73c4 in 938ms, sequenceid=140, compaction requested=false 2024-11-25T17:09:31,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:31,930 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:31,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-25T17:09:31,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:31,931 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-25T17:09:31,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:31,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:31,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:31,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:31,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:31,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:31,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125208475924862428db19b93aa1d35b365_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554570986/Put/seqid=0 2024-11-25T17:09:31,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742210_1386 (size=12304) 2024-11-25T17:09:31,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:31,970 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125208475924862428db19b93aa1d35b365_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125208475924862428db19b93aa1d35b365_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:31,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/7f229fb42e9b4a81b80ba3ccb25b3917, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:31,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/7f229fb42e9b4a81b80ba3ccb25b3917 is 175, key is test_row_0/A:col10/1732554570986/Put/seqid=0 2024-11-25T17:09:32,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742211_1387 (size=31105) 2024-11-25T17:09:32,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-25T17:09:32,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:32,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:32,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:32,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554632146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:32,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:32,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554632251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:32,414 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=168, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/7f229fb42e9b4a81b80ba3ccb25b3917 2024-11-25T17:09:32,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/5f20c58a86d2470aac39fc963d8a41c5 is 50, key is test_row_0/B:col10/1732554570986/Put/seqid=0 2024-11-25T17:09:32,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742212_1388 (size=12151) 2024-11-25T17:09:32,429 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/5f20c58a86d2470aac39fc963d8a41c5 2024-11-25T17:09:32,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/c6d89b7244e74186b69d9bcd0a9aa795 is 50, key is test_row_0/C:col10/1732554570986/Put/seqid=0 2024-11-25T17:09:32,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742213_1389 (size=12151) 2024-11-25T17:09:32,440 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/c6d89b7244e74186b69d9bcd0a9aa795 2024-11-25T17:09:32,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/7f229fb42e9b4a81b80ba3ccb25b3917 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/7f229fb42e9b4a81b80ba3ccb25b3917 2024-11-25T17:09:32,451 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/7f229fb42e9b4a81b80ba3ccb25b3917, entries=150, sequenceid=168, filesize=30.4 K 2024-11-25T17:09:32,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/5f20c58a86d2470aac39fc963d8a41c5 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5f20c58a86d2470aac39fc963d8a41c5 2024-11-25T17:09:32,455 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5f20c58a86d2470aac39fc963d8a41c5, entries=150, sequenceid=168, filesize=11.9 K 2024-11-25T17:09:32,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/c6d89b7244e74186b69d9bcd0a9aa795 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c6d89b7244e74186b69d9bcd0a9aa795 2024-11-25T17:09:32,458 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c6d89b7244e74186b69d9bcd0a9aa795, entries=150, sequenceid=168, filesize=11.9 K 2024-11-25T17:09:32,459 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 59409bf208d66df7ccc7026d9c7a73c4 in 528ms, sequenceid=168, compaction requested=true 2024-11-25T17:09:32,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:32,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:32,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-25T17:09:32,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-25T17:09:32,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-25T17:09:32,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4560 sec 2024-11-25T17:09:32,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:32,461 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-25T17:09:32,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:32,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:32,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:32,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:32,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:32,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:32,463 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.4600 sec 2024-11-25T17:09:32,471 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411259676fdd531e842b98bbbd19aef197154_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554572140/Put/seqid=0 2024-11-25T17:09:32,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742214_1390 (size=14794) 2024-11-25T17:09:32,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:32,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554632589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:32,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:32,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554632702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:32,878 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:32,889 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411259676fdd531e842b98bbbd19aef197154_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411259676fdd531e842b98bbbd19aef197154_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:32,890 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/efd79ada91ae4bc09ff694952f05cea2, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:32,891 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/efd79ada91ae4bc09ff694952f05cea2 is 175, key is test_row_0/A:col10/1732554572140/Put/seqid=0 2024-11-25T17:09:32,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:32,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554632910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:32,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742215_1391 (size=39749) 2024-11-25T17:09:32,928 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=180, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/efd79ada91ae4bc09ff694952f05cea2 2024-11-25T17:09:32,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/6277a4d5387643dabfb3cdaeca5eb085 is 50, key is test_row_0/B:col10/1732554572140/Put/seqid=0 2024-11-25T17:09:32,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742216_1392 (size=12151) 2024-11-25T17:09:33,002 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/6277a4d5387643dabfb3cdaeca5eb085 2024-11-25T17:09:33,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/31c464916e40444ab3ccd873287ae503 is 50, key is test_row_0/C:col10/1732554572140/Put/seqid=0 2024-11-25T17:09:33,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742217_1393 (size=12151) 2024-11-25T17:09:33,038 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/31c464916e40444ab3ccd873287ae503 2024-11-25T17:09:33,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/efd79ada91ae4bc09ff694952f05cea2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/efd79ada91ae4bc09ff694952f05cea2 2024-11-25T17:09:33,078 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/efd79ada91ae4bc09ff694952f05cea2, entries=200, sequenceid=180, filesize=38.8 K 2024-11-25T17:09:33,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/6277a4d5387643dabfb3cdaeca5eb085 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/6277a4d5387643dabfb3cdaeca5eb085 2024-11-25T17:09:33,084 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/6277a4d5387643dabfb3cdaeca5eb085, entries=150, sequenceid=180, filesize=11.9 K 2024-11-25T17:09:33,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/31c464916e40444ab3ccd873287ae503 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/31c464916e40444ab3ccd873287ae503 2024-11-25T17:09:33,089 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/31c464916e40444ab3ccd873287ae503, entries=150, sequenceid=180, filesize=11.9 K 2024-11-25T17:09:33,090 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 59409bf208d66df7ccc7026d9c7a73c4 in 629ms, sequenceid=180, compaction requested=true 2024-11-25T17:09:33,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:33,091 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:33,092 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141882 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:33,092 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/A is initiating minor compaction (all files) 2024-11-25T17:09:33,092 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/A in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:33,092 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/661a1b1043d14f0488af6b7d9f5dddd2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/9c01e8566bb24e8495d1e82e5fe641b9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/7f229fb42e9b4a81b80ba3ccb25b3917, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/efd79ada91ae4bc09ff694952f05cea2] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=138.6 K 2024-11-25T17:09:33,092 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:33,092 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/661a1b1043d14f0488af6b7d9f5dddd2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/9c01e8566bb24e8495d1e82e5fe641b9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/7f229fb42e9b4a81b80ba3ccb25b3917, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/efd79ada91ae4bc09ff694952f05cea2] 2024-11-25T17:09:33,093 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 661a1b1043d14f0488af6b7d9f5dddd2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732554569177 2024-11-25T17:09:33,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:33,093 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c01e8566bb24e8495d1e82e5fe641b9, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732554570831 2024-11-25T17:09:33,093 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f229fb42e9b4a81b80ba3ccb25b3917, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732554570969 2024-11-25T17:09:33,094 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting efd79ada91ae4bc09ff694952f05cea2, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732554572134 2024-11-25T17:09:33,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:33,096 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:33,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:33,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:33,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:33,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:33,098 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48778 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:33,098 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/B is initiating minor compaction (all files) 2024-11-25T17:09:33,098 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/B in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:33,098 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c106c25eda3d4e7f9011d69fc72d88fe, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/719fa184e58845d78137ac4da028759b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5f20c58a86d2470aac39fc963d8a41c5, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/6277a4d5387643dabfb3cdaeca5eb085] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=47.6 K 2024-11-25T17:09:33,098 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting c106c25eda3d4e7f9011d69fc72d88fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732554569177 2024-11-25T17:09:33,099 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 719fa184e58845d78137ac4da028759b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732554570831 2024-11-25T17:09:33,100 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f20c58a86d2470aac39fc963d8a41c5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732554570969 2024-11-25T17:09:33,101 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 6277a4d5387643dabfb3cdaeca5eb085, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732554572140 2024-11-25T17:09:33,107 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:33,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-25T17:09:33,108 INFO [Thread-1567 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-25T17:09:33,109 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:33,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-25T17:09:33,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-25T17:09:33,111 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:33,111 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:33,112 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:33,132 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#B#compaction#335 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:33,132 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/2b16c313fb7044bfacf8ea0445b078b7 is 50, key is test_row_0/B:col10/1732554572140/Put/seqid=0 2024-11-25T17:09:33,140 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241125af39ca702ff84e7ba49c843323066269_59409bf208d66df7ccc7026d9c7a73c4 store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:33,142 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241125af39ca702ff84e7ba49c843323066269_59409bf208d66df7ccc7026d9c7a73c4, store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:33,143 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125af39ca702ff84e7ba49c843323066269_59409bf208d66df7ccc7026d9c7a73c4 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:33,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742218_1394 (size=12561) 2024-11-25T17:09:33,172 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/2b16c313fb7044bfacf8ea0445b078b7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/2b16c313fb7044bfacf8ea0445b078b7 2024-11-25T17:09:33,177 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/B of 59409bf208d66df7ccc7026d9c7a73c4 into 2b16c313fb7044bfacf8ea0445b078b7(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:33,177 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:33,177 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/B, priority=12, startTime=1732554573096; duration=0sec 2024-11-25T17:09:33,177 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:33,177 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:B 2024-11-25T17:09:33,177 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:33,179 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48778 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:33,180 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/C is initiating minor compaction (all files) 2024-11-25T17:09:33,180 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/C in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:33,180 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/e851cf20085c41d2b0e9884a0d22e84e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/464ca64464d24141b3f9cf6fb7cb3307, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c6d89b7244e74186b69d9bcd0a9aa795, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/31c464916e40444ab3ccd873287ae503] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=47.6 K 2024-11-25T17:09:33,181 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting e851cf20085c41d2b0e9884a0d22e84e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732554569177 2024-11-25T17:09:33,181 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 464ca64464d24141b3f9cf6fb7cb3307, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732554570831 2024-11-25T17:09:33,181 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting c6d89b7244e74186b69d9bcd0a9aa795, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732554570969 2024-11-25T17:09:33,182 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 31c464916e40444ab3ccd873287ae503, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732554572140 2024-11-25T17:09:33,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742219_1395 (size=4469) 2024-11-25T17:09:33,193 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#A#compaction#334 average throughput is 0.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:33,193 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/bbf0ff700a1f439db85103f8a47ead42 is 175, key is test_row_0/A:col10/1732554572140/Put/seqid=0 2024-11-25T17:09:33,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742220_1396 (size=31515) 2024-11-25T17:09:33,204 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#C#compaction#336 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:33,204 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/2066f5bd87dd47fa805e46f6e11c4623 is 50, key is test_row_0/C:col10/1732554572140/Put/seqid=0 2024-11-25T17:09:33,205 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/bbf0ff700a1f439db85103f8a47ead42 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/bbf0ff700a1f439db85103f8a47ead42 2024-11-25T17:09:33,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-25T17:09:33,214 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/A of 59409bf208d66df7ccc7026d9c7a73c4 into bbf0ff700a1f439db85103f8a47ead42(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:33,214 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:33,214 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/A, priority=12, startTime=1732554573090; duration=0sec 2024-11-25T17:09:33,214 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:33,214 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:A 2024-11-25T17:09:33,223 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-25T17:09:33,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:33,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:33,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:33,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:33,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:33,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:33,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:33,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742221_1397 (size=12561) 2024-11-25T17:09:33,263 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:33,263 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/2066f5bd87dd47fa805e46f6e11c4623 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/2066f5bd87dd47fa805e46f6e11c4623 2024-11-25T17:09:33,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-25T17:09:33,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:33,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:33,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:33,265 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:33,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:33,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125bb4ecca4135b4d0195fbfdae46baf3ce_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554573222/Put/seqid=0 2024-11-25T17:09:33,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:33,277 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/C of 59409bf208d66df7ccc7026d9c7a73c4 into 2066f5bd87dd47fa805e46f6e11c4623(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:33,278 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:33,278 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/C, priority=12, startTime=1732554573097; duration=0sec 2024-11-25T17:09:33,278 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:33,278 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:C 2024-11-25T17:09:33,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:33,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554633295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:33,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742222_1398 (size=14794) 2024-11-25T17:09:33,309 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:33,314 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125bb4ecca4135b4d0195fbfdae46baf3ce_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125bb4ecca4135b4d0195fbfdae46baf3ce_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:33,315 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/c43c93e0e848487ea4482e3c8a52e636, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:33,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/c43c93e0e848487ea4482e3c8a52e636 is 175, key is test_row_0/A:col10/1732554573222/Put/seqid=0 2024-11-25T17:09:33,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:33,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554633337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:33,342 DEBUG [Thread-1565 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4155 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., hostname=6579369734b6,41865,1732554474464, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:09:33,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:33,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554633339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:33,343 DEBUG [Thread-1561 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., hostname=6579369734b6,41865,1732554474464, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:09:33,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:33,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554633346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:33,351 DEBUG [Thread-1557 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., hostname=6579369734b6,41865,1732554474464, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:09:33,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742223_1399 (size=39749) 2024-11-25T17:09:33,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:33,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554633365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:33,371 DEBUG [Thread-1563 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4185 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., hostname=6579369734b6,41865,1732554474464, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:09:33,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:33,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554633404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:33,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-25T17:09:33,427 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:33,428 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-25T17:09:33,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:33,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:33,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:33,428 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:33,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:33,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:33,585 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:33,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-25T17:09:33,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:33,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:33,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:33,586 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:33,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:33,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:33,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:33,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554633611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:33,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-25T17:09:33,741 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:33,742 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-25T17:09:33,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:33,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:33,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:33,742 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:33,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:33,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:33,759 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=207, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/c43c93e0e848487ea4482e3c8a52e636 2024-11-25T17:09:33,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/70c58799943745fa9a6c4a16a87fbced is 50, key is test_row_0/B:col10/1732554573222/Put/seqid=0 2024-11-25T17:09:33,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742224_1400 (size=12151) 2024-11-25T17:09:33,896 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:33,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-25T17:09:33,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:33,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:33,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:33,898 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:33,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:33,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:33,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:33,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554633915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:34,053 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:34,053 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-25T17:09:34,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:34,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:34,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:34,054 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:34,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:34,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:34,193 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-25T17:09:34,193 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-25T17:09:34,211 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:34,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-25T17:09:34,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:34,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:34,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:34,214 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:34,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:34,214 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/70c58799943745fa9a6c4a16a87fbced 2024-11-25T17:09:34,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:34,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-25T17:09:34,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/64797956b02647d19bde72768286ef14 is 50, key is test_row_0/C:col10/1732554573222/Put/seqid=0 2024-11-25T17:09:34,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742225_1401 (size=12151) 2024-11-25T17:09:34,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/64797956b02647d19bde72768286ef14 2024-11-25T17:09:34,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/c43c93e0e848487ea4482e3c8a52e636 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/c43c93e0e848487ea4482e3c8a52e636 2024-11-25T17:09:34,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/c43c93e0e848487ea4482e3c8a52e636, entries=200, sequenceid=207, filesize=38.8 K 2024-11-25T17:09:34,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/70c58799943745fa9a6c4a16a87fbced as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/70c58799943745fa9a6c4a16a87fbced 2024-11-25T17:09:34,331 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/70c58799943745fa9a6c4a16a87fbced, entries=150, sequenceid=207, filesize=11.9 K 2024-11-25T17:09:34,340 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/64797956b02647d19bde72768286ef14 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/64797956b02647d19bde72768286ef14 2024-11-25T17:09:34,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/64797956b02647d19bde72768286ef14, entries=150, sequenceid=207, filesize=11.9 K 2024-11-25T17:09:34,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 59409bf208d66df7ccc7026d9c7a73c4 in 1129ms, sequenceid=207, compaction requested=false 2024-11-25T17:09:34,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:34,369 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:34,370 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-25T17:09:34,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:34,370 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:09:34,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:34,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:34,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:34,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:34,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:34,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:34,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125b07c25e0897b4821b03886449379c54a_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554573252/Put/seqid=0 2024-11-25T17:09:34,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742226_1402 (size=12304) 2024-11-25T17:09:34,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:34,408 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125b07c25e0897b4821b03886449379c54a_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125b07c25e0897b4821b03886449379c54a_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:34,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/152a18f246ba481ab7ee82b72a514719, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:34,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/152a18f246ba481ab7ee82b72a514719 is 175, key is test_row_0/A:col10/1732554573252/Put/seqid=0 2024-11-25T17:09:34,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742227_1403 (size=31105) 2024-11-25T17:09:34,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:34,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:34,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:34,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554634548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:34,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:34,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554634652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:34,824 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=219, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/152a18f246ba481ab7ee82b72a514719 2024-11-25T17:09:34,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/4dc9778366ab48888e3b222131287426 is 50, key is test_row_0/B:col10/1732554573252/Put/seqid=0 2024-11-25T17:09:34,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742228_1404 (size=12151) 2024-11-25T17:09:34,837 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/4dc9778366ab48888e3b222131287426 2024-11-25T17:09:34,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/c01ec0a6fa0e4c068585173295326bad is 50, key is test_row_0/C:col10/1732554573252/Put/seqid=0 2024-11-25T17:09:34,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742229_1405 (size=12151) 2024-11-25T17:09:34,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:34,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554634860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:35,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:35,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554635170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:35,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-25T17:09:35,251 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/c01ec0a6fa0e4c068585173295326bad 2024-11-25T17:09:35,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/152a18f246ba481ab7ee82b72a514719 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/152a18f246ba481ab7ee82b72a514719 2024-11-25T17:09:35,262 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/152a18f246ba481ab7ee82b72a514719, entries=150, sequenceid=219, filesize=30.4 K 2024-11-25T17:09:35,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/4dc9778366ab48888e3b222131287426 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/4dc9778366ab48888e3b222131287426 2024-11-25T17:09:35,270 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/4dc9778366ab48888e3b222131287426, entries=150, sequenceid=219, filesize=11.9 K 2024-11-25T17:09:35,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/c01ec0a6fa0e4c068585173295326bad as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c01ec0a6fa0e4c068585173295326bad 2024-11-25T17:09:35,276 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c01ec0a6fa0e4c068585173295326bad, entries=150, sequenceid=219, filesize=11.9 K 2024-11-25T17:09:35,277 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 59409bf208d66df7ccc7026d9c7a73c4 in 907ms, sequenceid=219, compaction requested=true 2024-11-25T17:09:35,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:35,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:35,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-25T17:09:35,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-25T17:09:35,290 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-25T17:09:35,290 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1730 sec 2024-11-25T17:09:35,295 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 2.1820 sec 2024-11-25T17:09:35,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:35,681 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-25T17:09:35,681 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:35,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:35,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:35,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:35,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:35,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:35,691 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125f28e6c09482647ebbe1774cb84574365_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554574545/Put/seqid=0 2024-11-25T17:09:35,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742230_1406 (size=14794) 2024-11-25T17:09:35,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:35,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554635745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:35,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:35,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554635858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:36,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:36,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554636063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:36,116 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:36,139 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125f28e6c09482647ebbe1774cb84574365_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125f28e6c09482647ebbe1774cb84574365_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:36,155 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/779d5da1abcd4809baab1fd2a804a9fc, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:36,156 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/779d5da1abcd4809baab1fd2a804a9fc is 175, key is test_row_0/A:col10/1732554574545/Put/seqid=0 2024-11-25T17:09:36,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742231_1407 (size=39749) 2024-11-25T17:09:36,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:36,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554636374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:36,573 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=245, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/779d5da1abcd4809baab1fd2a804a9fc 2024-11-25T17:09:36,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/b6cdd7cd44b34a02b259b72a1a46fcd9 is 50, key is test_row_0/B:col10/1732554574545/Put/seqid=0 2024-11-25T17:09:36,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742232_1408 (size=12151) 2024-11-25T17:09:36,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:36,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554636881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:37,002 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/b6cdd7cd44b34a02b259b72a1a46fcd9 2024-11-25T17:09:37,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/16b0c30691ce41719cad65aad94ebeec is 50, key is test_row_0/C:col10/1732554574545/Put/seqid=0 2024-11-25T17:09:37,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742233_1409 (size=12151) 2024-11-25T17:09:37,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-25T17:09:37,220 INFO [Thread-1567 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-25T17:09:37,221 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:37,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-25T17:09:37,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-25T17:09:37,223 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:37,223 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:37,223 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:37,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-25T17:09:37,375 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:37,376 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-25T17:09:37,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:37,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:37,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:37,376 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:37,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:37,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:37,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57066 deadline: 1732554637371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:37,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:37,378 DEBUG [Thread-1561 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8193 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., hostname=6579369734b6,41865,1732554474464, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:09:37,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:37,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57118 deadline: 1732554637379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:37,380 DEBUG [Thread-1565 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8193 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., hostname=6579369734b6,41865,1732554474464, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:09:37,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:37,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57078 deadline: 1732554637385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:37,387 DEBUG [Thread-1557 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8202 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., hostname=6579369734b6,41865,1732554474464, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:09:37,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:37,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57082 deadline: 1732554637395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:37,402 DEBUG [Thread-1563 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8215 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., hostname=6579369734b6,41865,1732554474464, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:09:37,441 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/16b0c30691ce41719cad65aad94ebeec 2024-11-25T17:09:37,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/779d5da1abcd4809baab1fd2a804a9fc as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/779d5da1abcd4809baab1fd2a804a9fc 2024-11-25T17:09:37,452 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/779d5da1abcd4809baab1fd2a804a9fc, entries=200, sequenceid=245, filesize=38.8 K 2024-11-25T17:09:37,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/b6cdd7cd44b34a02b259b72a1a46fcd9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/b6cdd7cd44b34a02b259b72a1a46fcd9 2024-11-25T17:09:37,457 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/b6cdd7cd44b34a02b259b72a1a46fcd9, entries=150, sequenceid=245, filesize=11.9 K 2024-11-25T17:09:37,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/16b0c30691ce41719cad65aad94ebeec as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/16b0c30691ce41719cad65aad94ebeec 2024-11-25T17:09:37,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/16b0c30691ce41719cad65aad94ebeec, entries=150, sequenceid=245, filesize=11.9 K 2024-11-25T17:09:37,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 59409bf208d66df7ccc7026d9c7a73c4 in 1789ms, sequenceid=245, compaction requested=true 2024-11-25T17:09:37,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:37,470 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:37,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:37,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:37,470 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:37,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:37,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:37,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:37,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:37,471 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142118 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:37,471 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/A is initiating minor compaction (all files) 2024-11-25T17:09:37,471 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/A in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:37,471 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/bbf0ff700a1f439db85103f8a47ead42, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/c43c93e0e848487ea4482e3c8a52e636, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/152a18f246ba481ab7ee82b72a514719, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/779d5da1abcd4809baab1fd2a804a9fc] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=138.8 K 2024-11-25T17:09:37,471 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:37,471 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/bbf0ff700a1f439db85103f8a47ead42, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/c43c93e0e848487ea4482e3c8a52e636, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/152a18f246ba481ab7ee82b72a514719, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/779d5da1abcd4809baab1fd2a804a9fc] 2024-11-25T17:09:37,472 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbf0ff700a1f439db85103f8a47ead42, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732554572140 2024-11-25T17:09:37,472 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:37,472 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/B is initiating minor compaction (all files) 2024-11-25T17:09:37,472 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/B in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:37,472 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/2b16c313fb7044bfacf8ea0445b078b7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/70c58799943745fa9a6c4a16a87fbced, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/4dc9778366ab48888e3b222131287426, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/b6cdd7cd44b34a02b259b72a1a46fcd9] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=47.9 K 2024-11-25T17:09:37,473 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting c43c93e0e848487ea4482e3c8a52e636, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732554572566 2024-11-25T17:09:37,473 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b16c313fb7044bfacf8ea0445b078b7, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732554572140 2024-11-25T17:09:37,473 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 152a18f246ba481ab7ee82b72a514719, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732554573252 2024-11-25T17:09:37,474 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 70c58799943745fa9a6c4a16a87fbced, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732554572566 2024-11-25T17:09:37,474 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 779d5da1abcd4809baab1fd2a804a9fc, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732554574509 2024-11-25T17:09:37,474 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 4dc9778366ab48888e3b222131287426, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732554573252 2024-11-25T17:09:37,475 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting b6cdd7cd44b34a02b259b72a1a46fcd9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732554574529 2024-11-25T17:09:37,500 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:37,503 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#B#compaction#347 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:37,503 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/1f829f2810a849b7a7d2e96cbe09a4b7 is 50, key is test_row_0/B:col10/1732554574545/Put/seqid=0 2024-11-25T17:09:37,506 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112507f3eaaad8984ca2854bd78fcd228b29_59409bf208d66df7ccc7026d9c7a73c4 store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:37,508 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112507f3eaaad8984ca2854bd78fcd228b29_59409bf208d66df7ccc7026d9c7a73c4, store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:37,508 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112507f3eaaad8984ca2854bd78fcd228b29_59409bf208d66df7ccc7026d9c7a73c4 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:37,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742234_1410 (size=12697) 2024-11-25T17:09:37,514 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/1f829f2810a849b7a7d2e96cbe09a4b7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/1f829f2810a849b7a7d2e96cbe09a4b7 2024-11-25T17:09:37,521 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/B of 59409bf208d66df7ccc7026d9c7a73c4 into 1f829f2810a849b7a7d2e96cbe09a4b7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:37,521 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:37,521 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/B, priority=12, startTime=1732554577470; duration=0sec 2024-11-25T17:09:37,521 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:37,521 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:B 2024-11-25T17:09:37,521 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:37,522 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:37,523 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/C is initiating minor compaction (all files) 2024-11-25T17:09:37,523 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/C in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:37,523 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/2066f5bd87dd47fa805e46f6e11c4623, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/64797956b02647d19bde72768286ef14, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c01ec0a6fa0e4c068585173295326bad, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/16b0c30691ce41719cad65aad94ebeec] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=47.9 K 2024-11-25T17:09:37,523 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 2066f5bd87dd47fa805e46f6e11c4623, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732554572140 2024-11-25T17:09:37,524 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 64797956b02647d19bde72768286ef14, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732554572566 2024-11-25T17:09:37,524 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting c01ec0a6fa0e4c068585173295326bad, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1732554573252 2024-11-25T17:09:37,524 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 16b0c30691ce41719cad65aad94ebeec, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732554574529 2024-11-25T17:09:37,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-25T17:09:37,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742235_1411 (size=4469) 2024-11-25T17:09:37,528 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:37,529 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-25T17:09:37,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:37,529 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-25T17:09:37,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:37,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:37,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:37,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:37,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:37,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:37,547 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#C#compaction#348 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:37,548 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/0f59bf84ee36460b8f87eb1bdfcb82af is 50, key is test_row_0/C:col10/1732554574545/Put/seqid=0 2024-11-25T17:09:37,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411256d9c1409b12e4e4b861f117e1fa05ead_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554575743/Put/seqid=0 2024-11-25T17:09:37,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742236_1412 (size=12697) 2024-11-25T17:09:37,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742237_1413 (size=12304) 2024-11-25T17:09:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-25T17:09:37,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:37,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:37,941 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#A#compaction#346 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:37,942 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/e10a01105cf74f2fbc6223e6f7030882 is 175, key is test_row_0/A:col10/1732554574545/Put/seqid=0 2024-11-25T17:09:37,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742238_1414 (size=31651) 2024-11-25T17:09:37,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:37,987 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/0f59bf84ee36460b8f87eb1bdfcb82af as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/0f59bf84ee36460b8f87eb1bdfcb82af 2024-11-25T17:09:37,997 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411256d9c1409b12e4e4b861f117e1fa05ead_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411256d9c1409b12e4e4b861f117e1fa05ead_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:38,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/3ceac19326d142b1b67e321b82164f7c, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:38,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/3ceac19326d142b1b67e321b82164f7c is 175, key is test_row_0/A:col10/1732554575743/Put/seqid=0 2024-11-25T17:09:38,006 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/C of 59409bf208d66df7ccc7026d9c7a73c4 into 0f59bf84ee36460b8f87eb1bdfcb82af(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:38,006 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:38,006 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/C, priority=12, startTime=1732554577470; duration=0sec 2024-11-25T17:09:38,007 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:38,007 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:C 2024-11-25T17:09:38,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742239_1415 (size=31105) 2024-11-25T17:09:38,043 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=256, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/3ceac19326d142b1b67e321b82164f7c 2024-11-25T17:09:38,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/7c2ff4994f5042628ef6e2d3103b5fc4 is 50, key is test_row_0/B:col10/1732554575743/Put/seqid=0 2024-11-25T17:09:38,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554638085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:38,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742240_1416 (size=12151) 2024-11-25T17:09:38,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:38,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554638198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:38,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-25T17:09:38,377 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/e10a01105cf74f2fbc6223e6f7030882 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/e10a01105cf74f2fbc6223e6f7030882 2024-11-25T17:09:38,388 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/A of 59409bf208d66df7ccc7026d9c7a73c4 into e10a01105cf74f2fbc6223e6f7030882(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:38,388 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:38,388 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/A, priority=12, startTime=1732554577470; duration=0sec 2024-11-25T17:09:38,388 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:38,388 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:A 2024-11-25T17:09:38,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:38,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554638406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:38,500 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/7c2ff4994f5042628ef6e2d3103b5fc4 2024-11-25T17:09:38,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/6e2e7cb744854b18bed2c42131f46af2 is 50, key is test_row_0/C:col10/1732554575743/Put/seqid=0 2024-11-25T17:09:38,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742241_1417 (size=12151) 2024-11-25T17:09:38,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:38,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554638709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:38,921 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/6e2e7cb744854b18bed2c42131f46af2 2024-11-25T17:09:38,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/3ceac19326d142b1b67e321b82164f7c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/3ceac19326d142b1b67e321b82164f7c 2024-11-25T17:09:38,958 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/3ceac19326d142b1b67e321b82164f7c, entries=150, sequenceid=256, filesize=30.4 K 2024-11-25T17:09:38,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/7c2ff4994f5042628ef6e2d3103b5fc4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/7c2ff4994f5042628ef6e2d3103b5fc4 2024-11-25T17:09:38,969 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/7c2ff4994f5042628ef6e2d3103b5fc4, entries=150, sequenceid=256, filesize=11.9 K 2024-11-25T17:09:38,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/6e2e7cb744854b18bed2c42131f46af2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/6e2e7cb744854b18bed2c42131f46af2 2024-11-25T17:09:38,974 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/6e2e7cb744854b18bed2c42131f46af2, entries=150, sequenceid=256, filesize=11.9 K 2024-11-25T17:09:38,975 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 59409bf208d66df7ccc7026d9c7a73c4 in 1446ms, sequenceid=256, compaction requested=false 2024-11-25T17:09:38,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:38,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:38,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-25T17:09:38,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-25T17:09:38,983 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-25T17:09:38,983 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7580 sec 2024-11-25T17:09:38,984 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.7620 sec 2024-11-25T17:09:39,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:39,215 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-25T17:09:39,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:39,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:39,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:39,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:39,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:39,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:39,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112502b07f1c31304be8a257689ac9232c60_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554578084/Put/seqid=0 2024-11-25T17:09:39,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:39,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554639237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:39,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742242_1418 (size=14994) 2024-11-25T17:09:39,251 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:39,258 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112502b07f1c31304be8a257689ac9232c60_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112502b07f1c31304be8a257689ac9232c60_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:39,259 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/e0eb252f784144779166836fffeb214c, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:39,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/e0eb252f784144779166836fffeb214c is 175, key is test_row_0/A:col10/1732554578084/Put/seqid=0 2024-11-25T17:09:39,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742243_1419 (size=39949) 2024-11-25T17:09:39,294 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=285, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/e0eb252f784144779166836fffeb214c 2024-11-25T17:09:39,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/c1817c98b42440ba93d79cab406fac50 is 50, key is test_row_0/B:col10/1732554578084/Put/seqid=0 2024-11-25T17:09:39,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-25T17:09:39,328 INFO [Thread-1567 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-25T17:09:39,329 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:39,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-25T17:09:39,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-25T17:09:39,334 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:39,335 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:39,335 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:39,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:39,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554639340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:39,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742244_1420 (size=12301) 2024-11-25T17:09:39,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-25T17:09:39,493 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:39,494 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-25T17:09:39,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:39,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:39,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:39,497 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:39,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:39,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:39,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:39,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554639544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:39,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-25T17:09:39,661 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:39,662 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-25T17:09:39,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:39,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:39,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:39,662 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:39,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:39,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:39,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/c1817c98b42440ba93d79cab406fac50 2024-11-25T17:09:39,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/5fbadeab4ec54b3d9386c3b8901f97e6 is 50, key is test_row_0/C:col10/1732554578084/Put/seqid=0 2024-11-25T17:09:39,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742245_1421 (size=12301) 2024-11-25T17:09:39,821 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:39,822 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-25T17:09:39,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:39,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:39,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:39,822 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:39,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:39,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:39,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:39,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554639853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:39,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-25T17:09:39,974 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:39,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-25T17:09:39,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:39,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:39,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:39,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:39,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:39,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:40,133 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:40,133 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-25T17:09:40,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:40,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:40,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:40,134 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:40,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:40,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:40,197 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/5fbadeab4ec54b3d9386c3b8901f97e6 2024-11-25T17:09:40,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/e0eb252f784144779166836fffeb214c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/e0eb252f784144779166836fffeb214c 2024-11-25T17:09:40,268 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/e0eb252f784144779166836fffeb214c, entries=200, sequenceid=285, filesize=39.0 K 2024-11-25T17:09:40,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/c1817c98b42440ba93d79cab406fac50 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c1817c98b42440ba93d79cab406fac50 2024-11-25T17:09:40,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c1817c98b42440ba93d79cab406fac50, entries=150, sequenceid=285, filesize=12.0 K 2024-11-25T17:09:40,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/5fbadeab4ec54b3d9386c3b8901f97e6 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/5fbadeab4ec54b3d9386c3b8901f97e6 2024-11-25T17:09:40,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/5fbadeab4ec54b3d9386c3b8901f97e6, entries=150, sequenceid=285, filesize=12.0 K 2024-11-25T17:09:40,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,286 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 59409bf208d66df7ccc7026d9c7a73c4 in 1072ms, sequenceid=285, compaction requested=true 2024-11-25T17:09:40,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:40,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:40,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:40,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:40,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-25T17:09:40,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:40,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-25T17:09:40,287 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:40,287 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:40,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,288 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:40,288 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/C is initiating minor compaction (all files) 2024-11-25T17:09:40,288 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/C in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:40,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,289 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/0f59bf84ee36460b8f87eb1bdfcb82af, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/6e2e7cb744854b18bed2c42131f46af2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/5fbadeab4ec54b3d9386c3b8901f97e6] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=36.3 K 2024-11-25T17:09:40,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,289 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f59bf84ee36460b8f87eb1bdfcb82af, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732554574529 2024-11-25T17:09:40,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,289 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102705 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:40,290 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/A is initiating minor compaction (all files) 2024-11-25T17:09:40,290 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/A in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:40,290 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/e10a01105cf74f2fbc6223e6f7030882, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/3ceac19326d142b1b67e321b82164f7c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/e0eb252f784144779166836fffeb214c] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=100.3 K 2024-11-25T17:09:40,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,290 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:40,290 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/e10a01105cf74f2fbc6223e6f7030882, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/3ceac19326d142b1b67e321b82164f7c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/e0eb252f784144779166836fffeb214c] 2024-11-25T17:09:40,290 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e2e7cb744854b18bed2c42131f46af2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732554575719 2024-11-25T17:09:40,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,290 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting e10a01105cf74f2fbc6223e6f7030882, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732554574529 2024-11-25T17:09:40,290 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 5fbadeab4ec54b3d9386c3b8901f97e6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732554578064 2024-11-25T17:09:40,291 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ceac19326d142b1b67e321b82164f7c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732554575719 2024-11-25T17:09:40,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,292 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0eb252f784144779166836fffeb214c, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732554578029 2024-11-25T17:09:40,292 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:40,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,292 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-25T17:09:40,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:40,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,293 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-25T17:09:40,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:40,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:40,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:40,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:40,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:40,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:40,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411253fe10e8fbdf8475990f43dfdb84c44bc_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554579223/Put/seqid=0 2024-11-25T17:09:40,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,314 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:40,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,323 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#C#compaction#357 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:40,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,323 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/81a44d96b2924d97b2a32413bbe43b52 is 50, key is test_row_0/C:col10/1732554578084/Put/seqid=0 2024-11-25T17:09:40,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,358 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241125e0f090c55d514d5a8a11aa7fff358ba7_59409bf208d66df7ccc7026d9c7a73c4 store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:40,360 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241125e0f090c55d514d5a8a11aa7fff358ba7_59409bf208d66df7ccc7026d9c7a73c4, store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:40,360 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125e0f090c55d514d5a8a11aa7fff358ba7_59409bf208d66df7ccc7026d9c7a73c4 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:40,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742246_1422 (size=9914) 2024-11-25T17:09:40,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742247_1423 (size=12949) 2024-11-25T17:09:40,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,418 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/81a44d96b2924d97b2a32413bbe43b52 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/81a44d96b2924d97b2a32413bbe43b52 2024-11-25T17:09:40,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,423 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/C of 59409bf208d66df7ccc7026d9c7a73c4 into 81a44d96b2924d97b2a32413bbe43b52(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:40,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,423 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:40,423 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/C, priority=13, startTime=1732554580287; duration=0sec 2024-11-25T17:09:40,423 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:40,423 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:C 2024-11-25T17:09:40,423 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:40,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,424 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:40,425 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/B is initiating minor compaction (all files) 2024-11-25T17:09:40,425 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/B in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:40,425 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/1f829f2810a849b7a7d2e96cbe09a4b7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/7c2ff4994f5042628ef6e2d3103b5fc4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c1817c98b42440ba93d79cab406fac50] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=36.3 K 2024-11-25T17:09:40,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,425 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f829f2810a849b7a7d2e96cbe09a4b7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732554574529 2024-11-25T17:09:40,426 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c2ff4994f5042628ef6e2d3103b5fc4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732554575719 2024-11-25T17:09:40,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,428 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting c1817c98b42440ba93d79cab406fac50, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732554578064 2024-11-25T17:09:40,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742248_1424 (size=4469) 2024-11-25T17:09:40,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,460 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#B#compaction#358 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:40,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,461 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/3ff6c418b8b145a5b1dd21da7fd5dbfd is 50, key is test_row_0/B:col10/1732554578084/Put/seqid=0 2024-11-25T17:09:40,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-25T17:09:40,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742249_1425 (size=12949) 2024-11-25T17:09:40,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,501 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/3ff6c418b8b145a5b1dd21da7fd5dbfd as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/3ff6c418b8b145a5b1dd21da7fd5dbfd 2024-11-25T17:09:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:40,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,507 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/B of 59409bf208d66df7ccc7026d9c7a73c4 into 3ff6c418b8b145a5b1dd21da7fd5dbfd(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:40,507 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:40,507 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/B, priority=13, startTime=1732554580287; duration=0sec 2024-11-25T17:09:40,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,507 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:40,507 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:B 2024-11-25T17:09:40,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:40,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:40,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554640730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:40,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:40,806 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411253fe10e8fbdf8475990f43dfdb84c44bc_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411253fe10e8fbdf8475990f43dfdb84c44bc_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:40,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/251a5609ee9a4d6489e543940d0a6fba, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:40,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/251a5609ee9a4d6489e543940d0a6fba is 175, key is test_row_0/A:col10/1732554579223/Put/seqid=0 2024-11-25T17:09:40,857 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#A#compaction#356 average throughput is 0.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:40,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:40,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554640842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:40,858 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/117f7fac2b364cdb8d0099cdc631aad1 is 175, key is test_row_0/A:col10/1732554578084/Put/seqid=0 2024-11-25T17:09:40,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742250_1426 (size=22561) 2024-11-25T17:09:40,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742251_1427 (size=31903) 2024-11-25T17:09:41,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:41,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554641059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:41,259 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=294, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/251a5609ee9a4d6489e543940d0a6fba 2024-11-25T17:09:41,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/b28ab00182314b54a3e2a11a1a5ac1b2 is 50, key is test_row_0/B:col10/1732554579223/Put/seqid=0 2024-11-25T17:09:41,325 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/117f7fac2b364cdb8d0099cdc631aad1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/117f7fac2b364cdb8d0099cdc631aad1 2024-11-25T17:09:41,333 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/A of 59409bf208d66df7ccc7026d9c7a73c4 into 117f7fac2b364cdb8d0099cdc631aad1(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-11-25T17:09:41,334 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:41,334 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/A, priority=13, startTime=1732554580287; duration=1sec 2024-11-25T17:09:41,334 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:41,334 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:A 2024-11-25T17:09:41,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742252_1428 (size=9857) 2024-11-25T17:09:41,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:41,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554641365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:41,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-25T17:09:41,746 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/b28ab00182314b54a3e2a11a1a5ac1b2 2024-11-25T17:09:41,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/f8f90bff865f42dd819ea03ff6876df6 is 50, key is test_row_0/C:col10/1732554579223/Put/seqid=0 2024-11-25T17:09:41,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742253_1429 (size=9857) 2024-11-25T17:09:41,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:41,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554641874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:42,215 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/f8f90bff865f42dd819ea03ff6876df6 2024-11-25T17:09:42,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/251a5609ee9a4d6489e543940d0a6fba as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/251a5609ee9a4d6489e543940d0a6fba 2024-11-25T17:09:42,236 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/251a5609ee9a4d6489e543940d0a6fba, entries=100, sequenceid=294, filesize=22.0 K 2024-11-25T17:09:42,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/b28ab00182314b54a3e2a11a1a5ac1b2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/b28ab00182314b54a3e2a11a1a5ac1b2 2024-11-25T17:09:42,241 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/b28ab00182314b54a3e2a11a1a5ac1b2, entries=100, sequenceid=294, filesize=9.6 K 2024-11-25T17:09:42,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/f8f90bff865f42dd819ea03ff6876df6 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f8f90bff865f42dd819ea03ff6876df6 2024-11-25T17:09:42,246 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f8f90bff865f42dd819ea03ff6876df6, entries=100, sequenceid=294, filesize=9.6 K 2024-11-25T17:09:42,246 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 59409bf208d66df7ccc7026d9c7a73c4 in 1954ms, sequenceid=294, compaction requested=false 2024-11-25T17:09:42,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:42,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:42,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-25T17:09:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-25T17:09:42,249 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-25T17:09:42,249 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9130 sec 2024-11-25T17:09:42,251 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 2.9210 sec 2024-11-25T17:09:42,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:42,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-25T17:09:42,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:42,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:42,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:42,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:42,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:42,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:42,911 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411254364b71778dc492ab85123e8a4a55e11_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554580708/Put/seqid=0 2024-11-25T17:09:42,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:42,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554642934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:42,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742254_1430 (size=12454) 2024-11-25T17:09:42,966 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:42,974 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411254364b71778dc492ab85123e8a4a55e11_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411254364b71778dc492ab85123e8a4a55e11_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:42,975 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/6c7b1e16eb354d108cda3068be74db51, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:42,975 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/6c7b1e16eb354d108cda3068be74db51 is 175, key is test_row_0/A:col10/1732554580708/Put/seqid=0 2024-11-25T17:09:43,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742255_1431 (size=31255) 2024-11-25T17:09:43,003 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=325, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/6c7b1e16eb354d108cda3068be74db51 2024-11-25T17:09:43,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/f8d80087baa04a48b4a622a8ef4be713 is 50, key is test_row_0/B:col10/1732554580708/Put/seqid=0 2024-11-25T17:09:43,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:43,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 286 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554643042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:43,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742256_1432 (size=12301) 2024-11-25T17:09:43,047 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/f8d80087baa04a48b4a622a8ef4be713 2024-11-25T17:09:43,056 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/beac34c654094c129a999a907fc9e7cb is 50, key is test_row_0/C:col10/1732554580708/Put/seqid=0 2024-11-25T17:09:43,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742257_1433 (size=12301) 2024-11-25T17:09:43,118 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/beac34c654094c129a999a907fc9e7cb 2024-11-25T17:09:43,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/6c7b1e16eb354d108cda3068be74db51 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/6c7b1e16eb354d108cda3068be74db51 2024-11-25T17:09:43,128 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/6c7b1e16eb354d108cda3068be74db51, entries=150, sequenceid=325, filesize=30.5 K 2024-11-25T17:09:43,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/f8d80087baa04a48b4a622a8ef4be713 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/f8d80087baa04a48b4a622a8ef4be713 2024-11-25T17:09:43,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,138 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/f8d80087baa04a48b4a622a8ef4be713, entries=150, sequenceid=325, filesize=12.0 K 2024-11-25T17:09:43,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/beac34c654094c129a999a907fc9e7cb as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/beac34c654094c129a999a907fc9e7cb 2024-11-25T17:09:43,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/beac34c654094c129a999a907fc9e7cb, entries=150, sequenceid=325, filesize=12.0 K 2024-11-25T17:09:43,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 59409bf208d66df7ccc7026d9c7a73c4 in 246ms, sequenceid=325, compaction requested=true 2024-11-25T17:09:43,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:43,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,145 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:43,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:43,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:43,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,145 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:43,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:43,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:43,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:43,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:43,146 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85719 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:43,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,146 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/A is initiating minor compaction (all files) 2024-11-25T17:09:43,146 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/A in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:43,146 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/117f7fac2b364cdb8d0099cdc631aad1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/251a5609ee9a4d6489e543940d0a6fba, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/6c7b1e16eb354d108cda3068be74db51] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=83.7 K 2024-11-25T17:09:43,146 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:43,146 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/117f7fac2b364cdb8d0099cdc631aad1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/251a5609ee9a4d6489e543940d0a6fba, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/6c7b1e16eb354d108cda3068be74db51] 2024-11-25T17:09:43,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,146 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 117f7fac2b364cdb8d0099cdc631aad1, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732554578064 2024-11-25T17:09:43,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,147 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 251a5609ee9a4d6489e543940d0a6fba, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732554579223 2024-11-25T17:09:43,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,147 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:43,147 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/B is initiating minor compaction (all files) 2024-11-25T17:09:43,147 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/B in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:43,148 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/3ff6c418b8b145a5b1dd21da7fd5dbfd, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/b28ab00182314b54a3e2a11a1a5ac1b2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/f8d80087baa04a48b4a622a8ef4be713] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=34.3 K 2024-11-25T17:09:43,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,148 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c7b1e16eb354d108cda3068be74db51, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732554580694 2024-11-25T17:09:43,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,148 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ff6c418b8b145a5b1dd21da7fd5dbfd, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732554578064 2024-11-25T17:09:43,149 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting b28ab00182314b54a3e2a11a1a5ac1b2, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732554579223 2024-11-25T17:09:43,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,149 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting f8d80087baa04a48b4a622a8ef4be713, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732554580694 2024-11-25T17:09:43,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,178 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:43,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,180 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411253494603f44c24ee78ed32a17b5dab774_59409bf208d66df7ccc7026d9c7a73c4 store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:43,180 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#B#compaction#365 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:43,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,182 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411253494603f44c24ee78ed32a17b5dab774_59409bf208d66df7ccc7026d9c7a73c4, store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:43,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,182 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411253494603f44c24ee78ed32a17b5dab774_59409bf208d66df7ccc7026d9c7a73c4 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:43,183 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/71ab3b475d4f493590db7bc5778913d2 is 50, key is test_row_0/B:col10/1732554580708/Put/seqid=0 2024-11-25T17:09:43,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742258_1434 (size=13051) 2024-11-25T17:09:43,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742259_1435 (size=4469) 2024-11-25T17:09:43,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,253 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#A#compaction#364 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:43,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,253 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/9cee2b8aca5741f4839c928f4850473a is 175, key is test_row_0/A:col10/1732554580708/Put/seqid=0 2024-11-25T17:09:43,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742260_1436 (size=32005) 2024-11-25T17:09:43,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,290 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/9cee2b8aca5741f4839c928f4850473a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/9cee2b8aca5741f4839c928f4850473a 2024-11-25T17:09:43,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,295 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/A of 59409bf208d66df7ccc7026d9c7a73c4 into 9cee2b8aca5741f4839c928f4850473a(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:43,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,295 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:43,295 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/A, priority=13, startTime=1732554583145; duration=0sec 2024-11-25T17:09:43,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,295 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:43,295 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:A 2024-11-25T17:09:43,296 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:43,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,296 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:43,296 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/C is initiating minor compaction (all files) 2024-11-25T17:09:43,297 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/C in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:43,297 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/81a44d96b2924d97b2a32413bbe43b52, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f8f90bff865f42dd819ea03ff6876df6, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/beac34c654094c129a999a907fc9e7cb] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=34.3 K 2024-11-25T17:09:43,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,297 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81a44d96b2924d97b2a32413bbe43b52, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732554578064 2024-11-25T17:09:43,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,297 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8f90bff865f42dd819ea03ff6876df6, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732554579223 2024-11-25T17:09:43,298 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting beac34c654094c129a999a907fc9e7cb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732554580694 2024-11-25T17:09:43,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:09:43,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:43,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:43,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:43,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:43,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:43,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:43,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:43,326 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#C#compaction#366 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:43,327 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/6fea467f7b19475b8b5655d8c6a97b85 is 50, key is test_row_0/C:col10/1732554580708/Put/seqid=0 2024-11-25T17:09:43,347 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125e848a5a0ba834ae6af5662b5e1cb786c_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554583304/Put/seqid=0 2024-11-25T17:09:43,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742261_1437 (size=13051) 2024-11-25T17:09:43,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742262_1438 (size=14994) 2024-11-25T17:09:43,381 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:43,388 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125e848a5a0ba834ae6af5662b5e1cb786c_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125e848a5a0ba834ae6af5662b5e1cb786c_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:43,389 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/f8dca733b6ac45b982bc70d171664951, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:43,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/f8dca733b6ac45b982bc70d171664951 is 175, key is test_row_0/A:col10/1732554583304/Put/seqid=0 2024-11-25T17:09:43,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742263_1439 (size=39949) 2024-11-25T17:09:43,395 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=337, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/f8dca733b6ac45b982bc70d171664951 2024-11-25T17:09:43,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/5cac7cc874a647ed819af6b9c03676ee is 50, key is test_row_0/B:col10/1732554583304/Put/seqid=0 2024-11-25T17:09:43,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742264_1440 (size=12301) 2024-11-25T17:09:43,426 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/5cac7cc874a647ed819af6b9c03676ee 2024-11-25T17:09:43,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/f4381d54e52845c08533b6b4892f349b is 50, key is test_row_0/C:col10/1732554583304/Put/seqid=0 2024-11-25T17:09:43,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-25T17:09:43,464 INFO [Thread-1567 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-25T17:09:43,470 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:43,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742265_1441 (size=12301) 2024-11-25T17:09:43,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-11-25T17:09:43,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-25T17:09:43,476 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:43,477 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:43,477 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:43,526 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:43,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 313 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554643516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:43,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-25T17:09:43,629 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:43,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-25T17:09:43,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:43,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:43,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:43,633 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:43,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:43,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:43,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:43,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 315 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554643627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:43,652 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/71ab3b475d4f493590db7bc5778913d2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/71ab3b475d4f493590db7bc5778913d2 2024-11-25T17:09:43,657 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/B of 59409bf208d66df7ccc7026d9c7a73c4 into 71ab3b475d4f493590db7bc5778913d2(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:43,657 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:43,657 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/B, priority=13, startTime=1732554583145; duration=0sec 2024-11-25T17:09:43,657 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:43,657 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:B 2024-11-25T17:09:43,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-25T17:09:43,781 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/6fea467f7b19475b8b5655d8c6a97b85 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/6fea467f7b19475b8b5655d8c6a97b85 2024-11-25T17:09:43,785 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:43,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-25T17:09:43,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:43,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:43,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:43,786 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:43,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:43,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:43,796 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/C of 59409bf208d66df7ccc7026d9c7a73c4 into 6fea467f7b19475b8b5655d8c6a97b85(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:43,797 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:43,797 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/C, priority=13, startTime=1732554583145; duration=0sec 2024-11-25T17:09:43,797 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:43,797 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:C 2024-11-25T17:09:43,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:43,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 317 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554643836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:43,875 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/f4381d54e52845c08533b6b4892f349b 2024-11-25T17:09:43,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/f8dca733b6ac45b982bc70d171664951 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/f8dca733b6ac45b982bc70d171664951 2024-11-25T17:09:43,883 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/f8dca733b6ac45b982bc70d171664951, entries=200, sequenceid=337, filesize=39.0 K 2024-11-25T17:09:43,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/5cac7cc874a647ed819af6b9c03676ee as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5cac7cc874a647ed819af6b9c03676ee 2024-11-25T17:09:43,888 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5cac7cc874a647ed819af6b9c03676ee, entries=150, sequenceid=337, filesize=12.0 K 2024-11-25T17:09:43,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/f4381d54e52845c08533b6b4892f349b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f4381d54e52845c08533b6b4892f349b 2024-11-25T17:09:43,895 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f4381d54e52845c08533b6b4892f349b, entries=150, sequenceid=337, filesize=12.0 K 2024-11-25T17:09:43,897 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 59409bf208d66df7ccc7026d9c7a73c4 in 584ms, sequenceid=337, compaction requested=false 2024-11-25T17:09:43,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:43,944 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:43,944 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-25T17:09:43,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:43,949 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-25T17:09:43,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:43,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:43,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:43,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:43,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:43,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:43,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125036d01c6f5c24df5aeb7c1d291c030c9_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554583506/Put/seqid=0 2024-11-25T17:09:44,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742266_1442 (size=12454) 2024-11-25T17:09:44,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:44,045 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125036d01c6f5c24df5aeb7c1d291c030c9_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125036d01c6f5c24df5aeb7c1d291c030c9_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:44,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/c8a7ac5f907840f9b03b43fa69bc80a0, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:44,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/c8a7ac5f907840f9b03b43fa69bc80a0 is 175, key is test_row_0/A:col10/1732554583506/Put/seqid=0 2024-11-25T17:09:44,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-25T17:09:44,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742267_1443 (size=31255) 2024-11-25T17:09:44,091 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=364, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/c8a7ac5f907840f9b03b43fa69bc80a0 2024-11-25T17:09:44,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/fe20b44f1d284f50859902053a6ae01e is 50, key is test_row_0/B:col10/1732554583506/Put/seqid=0 2024-11-25T17:09:44,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:44,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. as already flushing 2024-11-25T17:09:44,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742268_1444 (size=12301) 2024-11-25T17:09:44,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:44,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 327 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554644200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:44,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:44,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 329 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554644309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:44,497 DEBUG [Thread-1568 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x167a78b0 to 127.0.0.1:56265 2024-11-25T17:09:44,497 DEBUG [Thread-1568 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:44,497 DEBUG [Thread-1572 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f49665c to 127.0.0.1:56265 2024-11-25T17:09:44,497 DEBUG [Thread-1572 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:44,501 DEBUG [Thread-1576 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75e4d3d0 to 127.0.0.1:56265 2024-11-25T17:09:44,501 DEBUG [Thread-1576 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:44,502 DEBUG [Thread-1574 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683f8469 to 127.0.0.1:56265 2024-11-25T17:09:44,502 DEBUG [Thread-1574 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:44,505 DEBUG [Thread-1570 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5aee939b to 127.0.0.1:56265 2024-11-25T17:09:44,505 DEBUG [Thread-1570 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:44,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:44,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 331 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:57142 deadline: 1732554644517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:44,568 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/fe20b44f1d284f50859902053a6ae01e 2024-11-25T17:09:44,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/86e5862c462c48c099fd85b14117dd06 is 50, key is test_row_0/C:col10/1732554583506/Put/seqid=0 2024-11-25T17:09:44,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742269_1445 (size=12301) 2024-11-25T17:09:44,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-25T17:09:44,581 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/86e5862c462c48c099fd85b14117dd06 2024-11-25T17:09:44,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/c8a7ac5f907840f9b03b43fa69bc80a0 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/c8a7ac5f907840f9b03b43fa69bc80a0 2024-11-25T17:09:44,588 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/c8a7ac5f907840f9b03b43fa69bc80a0, entries=150, sequenceid=364, filesize=30.5 K 2024-11-25T17:09:44,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/fe20b44f1d284f50859902053a6ae01e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/fe20b44f1d284f50859902053a6ae01e 2024-11-25T17:09:44,591 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/fe20b44f1d284f50859902053a6ae01e, entries=150, sequenceid=364, filesize=12.0 K 2024-11-25T17:09:44,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/86e5862c462c48c099fd85b14117dd06 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/86e5862c462c48c099fd85b14117dd06 2024-11-25T17:09:44,594 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/86e5862c462c48c099fd85b14117dd06, entries=150, sequenceid=364, filesize=12.0 K 2024-11-25T17:09:44,595 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 59409bf208d66df7ccc7026d9c7a73c4 in 646ms, sequenceid=364, compaction requested=true 2024-11-25T17:09:44,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:44,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:44,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-25T17:09:44,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-11-25T17:09:44,597 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-25T17:09:44,598 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1190 sec 2024-11-25T17:09:44,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 1.1280 sec 2024-11-25T17:09:44,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:44,821 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-25T17:09:44,821 DEBUG [Thread-1559 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b82ba2a to 127.0.0.1:56265 2024-11-25T17:09:44,821 DEBUG [Thread-1559 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:44,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:44,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:44,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:44,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:44,821 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:44,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:44,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125035eb2ebfe714cf7942850e5875d473b_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554584153/Put/seqid=0 2024-11-25T17:09:44,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742270_1446 (size=12454) 2024-11-25T17:09:45,235 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:45,239 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125035eb2ebfe714cf7942850e5875d473b_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125035eb2ebfe714cf7942850e5875d473b_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:45,240 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/08dbd83b85464e07aaf408a1f87ef52e, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:45,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/08dbd83b85464e07aaf408a1f87ef52e is 175, key is test_row_0/A:col10/1732554584153/Put/seqid=0 2024-11-25T17:09:45,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742271_1447 (size=31255) 2024-11-25T17:09:45,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-25T17:09:45,580 INFO [Thread-1567 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-25T17:09:45,647 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=376, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/08dbd83b85464e07aaf408a1f87ef52e 2024-11-25T17:09:45,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/47fab3e532004df48ac483fff0700ee2 is 50, key is test_row_0/B:col10/1732554584153/Put/seqid=0 2024-11-25T17:09:45,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742272_1448 (size=12301) 2024-11-25T17:09:46,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/47fab3e532004df48ac483fff0700ee2 2024-11-25T17:09:46,064 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/34a9737d9bb640ee99447e9d9f01267d is 50, key is test_row_0/C:col10/1732554584153/Put/seqid=0 2024-11-25T17:09:46,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742273_1449 (size=12301) 2024-11-25T17:09:46,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/34a9737d9bb640ee99447e9d9f01267d 2024-11-25T17:09:46,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/08dbd83b85464e07aaf408a1f87ef52e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/08dbd83b85464e07aaf408a1f87ef52e 2024-11-25T17:09:46,577 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/08dbd83b85464e07aaf408a1f87ef52e, entries=150, sequenceid=376, filesize=30.5 K 2024-11-25T17:09:46,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/47fab3e532004df48ac483fff0700ee2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/47fab3e532004df48ac483fff0700ee2 2024-11-25T17:09:46,657 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/47fab3e532004df48ac483fff0700ee2, entries=150, sequenceid=376, filesize=12.0 K 2024-11-25T17:09:46,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/34a9737d9bb640ee99447e9d9f01267d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/34a9737d9bb640ee99447e9d9f01267d 2024-11-25T17:09:46,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/34a9737d9bb640ee99447e9d9f01267d, entries=150, sequenceid=376, filesize=12.0 K 2024-11-25T17:09:46,739 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=0 B/0 for 59409bf208d66df7ccc7026d9c7a73c4 in 1918ms, sequenceid=376, compaction requested=true 2024-11-25T17:09:46,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:46,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:46,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:46,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:46,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-25T17:09:46,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59409bf208d66df7ccc7026d9c7a73c4:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:46,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-25T17:09:46,741 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:46,742 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:46,785 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:46,785 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/C is initiating minor compaction (all files) 2024-11-25T17:09:46,785 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/C in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:46,786 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/6fea467f7b19475b8b5655d8c6a97b85, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f4381d54e52845c08533b6b4892f349b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/86e5862c462c48c099fd85b14117dd06, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/34a9737d9bb640ee99447e9d9f01267d] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=48.8 K 2024-11-25T17:09:46,787 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134464 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:46,787 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/A is initiating minor compaction (all files) 2024-11-25T17:09:46,787 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/A in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:46,787 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/9cee2b8aca5741f4839c928f4850473a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/f8dca733b6ac45b982bc70d171664951, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/c8a7ac5f907840f9b03b43fa69bc80a0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/08dbd83b85464e07aaf408a1f87ef52e] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=131.3 K 2024-11-25T17:09:46,787 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:46,787 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/9cee2b8aca5741f4839c928f4850473a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/f8dca733b6ac45b982bc70d171664951, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/c8a7ac5f907840f9b03b43fa69bc80a0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/08dbd83b85464e07aaf408a1f87ef52e] 2024-11-25T17:09:46,788 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6fea467f7b19475b8b5655d8c6a97b85, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732554580694 2024-11-25T17:09:46,793 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cee2b8aca5741f4839c928f4850473a, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732554580694 2024-11-25T17:09:46,794 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4381d54e52845c08533b6b4892f349b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1732554583293 2024-11-25T17:09:46,801 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting f8dca733b6ac45b982bc70d171664951, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1732554582917 2024-11-25T17:09:46,802 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86e5862c462c48c099fd85b14117dd06, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732554583495 2024-11-25T17:09:46,806 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting c8a7ac5f907840f9b03b43fa69bc80a0, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732554583495 2024-11-25T17:09:46,809 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34a9737d9bb640ee99447e9d9f01267d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732554584153 2024-11-25T17:09:46,809 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 08dbd83b85464e07aaf408a1f87ef52e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732554584153 2024-11-25T17:09:46,852 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#C#compaction#376 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:46,852 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/06278c2a943448f39538f0e667d22d23 is 50, key is test_row_0/C:col10/1732554584153/Put/seqid=0 2024-11-25T17:09:46,857 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:46,860 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241125cd9fbd73fde642b090997e06eb0b43ec_59409bf208d66df7ccc7026d9c7a73c4 store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:46,864 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241125cd9fbd73fde642b090997e06eb0b43ec_59409bf208d66df7ccc7026d9c7a73c4, store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:46,864 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125cd9fbd73fde642b090997e06eb0b43ec_59409bf208d66df7ccc7026d9c7a73c4 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:46,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742274_1450 (size=13187) 2024-11-25T17:09:46,883 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/06278c2a943448f39538f0e667d22d23 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/06278c2a943448f39538f0e667d22d23 2024-11-25T17:09:46,890 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/C of 59409bf208d66df7ccc7026d9c7a73c4 into 06278c2a943448f39538f0e667d22d23(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:46,890 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:46,890 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/C, priority=12, startTime=1732554586740; duration=0sec 2024-11-25T17:09:46,890 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:46,890 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:C 2024-11-25T17:09:46,890 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:46,891 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:46,891 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 59409bf208d66df7ccc7026d9c7a73c4/B is initiating minor compaction (all files) 2024-11-25T17:09:46,892 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59409bf208d66df7ccc7026d9c7a73c4/B in TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:46,892 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/71ab3b475d4f493590db7bc5778913d2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5cac7cc874a647ed819af6b9c03676ee, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/fe20b44f1d284f50859902053a6ae01e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/47fab3e532004df48ac483fff0700ee2] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp, totalSize=48.8 K 2024-11-25T17:09:46,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742275_1451 (size=4469) 2024-11-25T17:09:46,894 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71ab3b475d4f493590db7bc5778913d2, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732554580694 2024-11-25T17:09:46,894 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5cac7cc874a647ed819af6b9c03676ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1732554583293 2024-11-25T17:09:46,894 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe20b44f1d284f50859902053a6ae01e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732554583495 2024-11-25T17:09:46,895 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47fab3e532004df48ac483fff0700ee2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732554584153 2024-11-25T17:09:46,906 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#B#compaction#378 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:46,906 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/fc095ab75324451eaf245bdca853eff6 is 50, key is test_row_0/B:col10/1732554584153/Put/seqid=0 2024-11-25T17:09:46,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742276_1452 (size=13187) 2024-11-25T17:09:46,928 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/fc095ab75324451eaf245bdca853eff6 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/fc095ab75324451eaf245bdca853eff6 2024-11-25T17:09:46,932 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/B of 59409bf208d66df7ccc7026d9c7a73c4 into fc095ab75324451eaf245bdca853eff6(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:46,932 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:46,932 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/B, priority=12, startTime=1732554586740; duration=0sec 2024-11-25T17:09:46,932 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:46,932 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:B 2024-11-25T17:09:47,295 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59409bf208d66df7ccc7026d9c7a73c4#A#compaction#377 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:47,295 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/8d931e8bbcee4528a8e4b34a97fa53a5 is 175, key is test_row_0/A:col10/1732554584153/Put/seqid=0 2024-11-25T17:09:47,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742277_1453 (size=32141) 2024-11-25T17:09:47,423 DEBUG [Thread-1557 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b976e1a to 127.0.0.1:56265 2024-11-25T17:09:47,423 DEBUG [Thread-1557 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:47,434 DEBUG [Thread-1565 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3dd5b441 to 127.0.0.1:56265 2024-11-25T17:09:47,434 DEBUG [Thread-1565 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:47,461 DEBUG [Thread-1561 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7b6cf8cb to 127.0.0.1:56265 2024-11-25T17:09:47,461 DEBUG [Thread-1561 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:47,485 DEBUG [Thread-1563 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ec15031 to 127.0.0.1:56265 2024-11-25T17:09:47,485 DEBUG [Thread-1563 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 23 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 208 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 27 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 24 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 16 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1724 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5172 rows 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1709 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5127 rows 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1717 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5151 rows 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1717 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5151 rows 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1724 2024-11-25T17:09:47,486 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5172 rows 2024-11-25T17:09:47,486 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-25T17:09:47,486 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68ad882f to 127.0.0.1:56265 2024-11-25T17:09:47,486 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:09:47,490 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-25T17:09:47,491 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-25T17:09:47,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:47,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-25T17:09:47,497 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554587497"}]},"ts":"1732554587497"} 2024-11-25T17:09:47,501 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-25T17:09:47,507 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-25T17:09:47,507 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-25T17:09:47,508 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=59409bf208d66df7ccc7026d9c7a73c4, UNASSIGN}] 2024-11-25T17:09:47,509 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=59409bf208d66df7ccc7026d9c7a73c4, UNASSIGN 2024-11-25T17:09:47,510 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=59409bf208d66df7ccc7026d9c7a73c4, regionState=CLOSING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:09:47,511 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-25T17:09:47,511 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; CloseRegionProcedure 59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:09:47,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-25T17:09:47,662 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:47,663 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(124): Close 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:47,663 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-25T17:09:47,663 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1681): Closing 59409bf208d66df7ccc7026d9c7a73c4, disabling compactions & flushes 2024-11-25T17:09:47,663 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1942): waiting for 1 compactions to complete for region TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:47,703 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/8d931e8bbcee4528a8e4b34a97fa53a5 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/8d931e8bbcee4528a8e4b34a97fa53a5 2024-11-25T17:09:47,708 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59409bf208d66df7ccc7026d9c7a73c4/A of 59409bf208d66df7ccc7026d9c7a73c4 into 8d931e8bbcee4528a8e4b34a97fa53a5(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:47,708 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:47,708 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4., storeName=59409bf208d66df7ccc7026d9c7a73c4/A, priority=12, startTime=1732554586739; duration=0sec 2024-11-25T17:09:47,708 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:47,708 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:47,708 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:47,708 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. after waiting 0 ms 2024-11-25T17:09:47,708 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59409bf208d66df7ccc7026d9c7a73c4:A 2024-11-25T17:09:47,708 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:47,708 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(2837): Flushing 59409bf208d66df7ccc7026d9c7a73c4 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-25T17:09:47,708 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=A 2024-11-25T17:09:47,708 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:47,708 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=B 2024-11-25T17:09:47,708 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:47,708 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59409bf208d66df7ccc7026d9c7a73c4, store=C 2024-11-25T17:09:47,708 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:47,714 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411251104dd92839a42e3910e96214aad9560_59409bf208d66df7ccc7026d9c7a73c4 is 50, key is test_row_0/A:col10/1732554587484/Put/seqid=0 2024-11-25T17:09:47,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742278_1454 (size=12454) 2024-11-25T17:09:47,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-25T17:09:48,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-25T17:09:48,118 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:48,122 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411251104dd92839a42e3910e96214aad9560_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411251104dd92839a42e3910e96214aad9560_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:48,123 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/930a705467a24f0e9e0375a009b120ac, store: [table=TestAcidGuarantees family=A region=59409bf208d66df7ccc7026d9c7a73c4] 2024-11-25T17:09:48,123 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/930a705467a24f0e9e0375a009b120ac is 175, key is test_row_0/A:col10/1732554587484/Put/seqid=0 2024-11-25T17:09:48,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742279_1455 (size=31255) 2024-11-25T17:09:48,530 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=386, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/930a705467a24f0e9e0375a009b120ac 2024-11-25T17:09:48,537 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/e4dba598114a4d26a8ac633a64aa024b is 50, key is test_row_0/B:col10/1732554587484/Put/seqid=0 2024-11-25T17:09:48,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742280_1456 (size=12301) 2024-11-25T17:09:48,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-25T17:09:48,943 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/e4dba598114a4d26a8ac633a64aa024b 2024-11-25T17:09:48,950 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/09c7a3c58c254bb58e2f8e637a8a47db is 50, key is test_row_0/C:col10/1732554587484/Put/seqid=0 2024-11-25T17:09:48,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742281_1457 (size=12301) 2024-11-25T17:09:49,354 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/09c7a3c58c254bb58e2f8e637a8a47db 2024-11-25T17:09:49,357 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/A/930a705467a24f0e9e0375a009b120ac as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/930a705467a24f0e9e0375a009b120ac 2024-11-25T17:09:49,360 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/930a705467a24f0e9e0375a009b120ac, entries=150, sequenceid=386, filesize=30.5 K 2024-11-25T17:09:49,361 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/B/e4dba598114a4d26a8ac633a64aa024b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/e4dba598114a4d26a8ac633a64aa024b 2024-11-25T17:09:49,364 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/e4dba598114a4d26a8ac633a64aa024b, entries=150, sequenceid=386, filesize=12.0 K 2024-11-25T17:09:49,365 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/.tmp/C/09c7a3c58c254bb58e2f8e637a8a47db as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/09c7a3c58c254bb58e2f8e637a8a47db 2024-11-25T17:09:49,368 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/09c7a3c58c254bb58e2f8e637a8a47db, entries=150, sequenceid=386, filesize=12.0 K 2024-11-25T17:09:49,368 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 59409bf208d66df7ccc7026d9c7a73c4 in 1660ms, sequenceid=386, compaction requested=false 2024-11-25T17:09:49,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/2c6a07e6d8734ece8ff614937b946d24, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/0c9eb5123683424d97c4dd8c85700a7b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/45583ed1941c4038a4f91629b50f850a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/f3a5b2944ba04d599f9a82125787c015, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/0a083c31fc974df593ddfc4b6a99a661, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/531c6c3dc8564ff6a766c02ab8a51616, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/ab453e730ba7465e816660925fc2848b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/487c93c85dee41efaaa27c1e9608d4bd, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/661a1b1043d14f0488af6b7d9f5dddd2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/9c01e8566bb24e8495d1e82e5fe641b9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/7f229fb42e9b4a81b80ba3ccb25b3917, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/efd79ada91ae4bc09ff694952f05cea2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/bbf0ff700a1f439db85103f8a47ead42, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/c43c93e0e848487ea4482e3c8a52e636, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/152a18f246ba481ab7ee82b72a514719, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/779d5da1abcd4809baab1fd2a804a9fc, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/e10a01105cf74f2fbc6223e6f7030882, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/3ceac19326d142b1b67e321b82164f7c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/e0eb252f784144779166836fffeb214c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/117f7fac2b364cdb8d0099cdc631aad1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/251a5609ee9a4d6489e543940d0a6fba, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/9cee2b8aca5741f4839c928f4850473a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/6c7b1e16eb354d108cda3068be74db51, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/f8dca733b6ac45b982bc70d171664951, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/c8a7ac5f907840f9b03b43fa69bc80a0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/08dbd83b85464e07aaf408a1f87ef52e] to archive 2024-11-25T17:09:49,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:09:49,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/2c6a07e6d8734ece8ff614937b946d24 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/2c6a07e6d8734ece8ff614937b946d24 2024-11-25T17:09:49,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/0c9eb5123683424d97c4dd8c85700a7b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/0c9eb5123683424d97c4dd8c85700a7b 2024-11-25T17:09:49,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/45583ed1941c4038a4f91629b50f850a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/45583ed1941c4038a4f91629b50f850a 2024-11-25T17:09:49,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/f3a5b2944ba04d599f9a82125787c015 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/f3a5b2944ba04d599f9a82125787c015 2024-11-25T17:09:49,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/0a083c31fc974df593ddfc4b6a99a661 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/0a083c31fc974df593ddfc4b6a99a661 2024-11-25T17:09:49,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/531c6c3dc8564ff6a766c02ab8a51616 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/531c6c3dc8564ff6a766c02ab8a51616 2024-11-25T17:09:49,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/ab453e730ba7465e816660925fc2848b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/ab453e730ba7465e816660925fc2848b 2024-11-25T17:09:49,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/487c93c85dee41efaaa27c1e9608d4bd to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/487c93c85dee41efaaa27c1e9608d4bd 2024-11-25T17:09:49,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/661a1b1043d14f0488af6b7d9f5dddd2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/661a1b1043d14f0488af6b7d9f5dddd2 2024-11-25T17:09:49,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/9c01e8566bb24e8495d1e82e5fe641b9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/9c01e8566bb24e8495d1e82e5fe641b9 2024-11-25T17:09:49,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/7f229fb42e9b4a81b80ba3ccb25b3917 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/7f229fb42e9b4a81b80ba3ccb25b3917 2024-11-25T17:09:49,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/efd79ada91ae4bc09ff694952f05cea2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/efd79ada91ae4bc09ff694952f05cea2 2024-11-25T17:09:49,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/bbf0ff700a1f439db85103f8a47ead42 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/bbf0ff700a1f439db85103f8a47ead42 2024-11-25T17:09:49,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/c43c93e0e848487ea4482e3c8a52e636 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/c43c93e0e848487ea4482e3c8a52e636 2024-11-25T17:09:49,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/152a18f246ba481ab7ee82b72a514719 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/152a18f246ba481ab7ee82b72a514719 2024-11-25T17:09:49,384 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/779d5da1abcd4809baab1fd2a804a9fc to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/779d5da1abcd4809baab1fd2a804a9fc 2024-11-25T17:09:49,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/e10a01105cf74f2fbc6223e6f7030882 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/e10a01105cf74f2fbc6223e6f7030882 2024-11-25T17:09:49,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/3ceac19326d142b1b67e321b82164f7c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/3ceac19326d142b1b67e321b82164f7c 2024-11-25T17:09:49,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/e0eb252f784144779166836fffeb214c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/e0eb252f784144779166836fffeb214c 2024-11-25T17:09:49,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/117f7fac2b364cdb8d0099cdc631aad1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/117f7fac2b364cdb8d0099cdc631aad1 2024-11-25T17:09:49,388 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/251a5609ee9a4d6489e543940d0a6fba to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/251a5609ee9a4d6489e543940d0a6fba 2024-11-25T17:09:49,389 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/9cee2b8aca5741f4839c928f4850473a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/9cee2b8aca5741f4839c928f4850473a 2024-11-25T17:09:49,389 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/6c7b1e16eb354d108cda3068be74db51 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/6c7b1e16eb354d108cda3068be74db51 2024-11-25T17:09:49,390 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/f8dca733b6ac45b982bc70d171664951 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/f8dca733b6ac45b982bc70d171664951 2024-11-25T17:09:49,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/c8a7ac5f907840f9b03b43fa69bc80a0 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/c8a7ac5f907840f9b03b43fa69bc80a0 2024-11-25T17:09:49,392 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/08dbd83b85464e07aaf408a1f87ef52e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/08dbd83b85464e07aaf408a1f87ef52e 2024-11-25T17:09:49,393 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/41047f472e724b388321ed62748895e4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/bf57c70851af47e4bc3b9b3048a0ed2f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/cd4b080922df40eca9f0ddebbf5dc375, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5891127b8e654972ba532afd36a39340, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/8519a0d6e9164fedad891329d582b6e1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/643fb7d9d56945dbaa1924634a802297, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c28b809db06d438186f3847c42057821, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c106c25eda3d4e7f9011d69fc72d88fe, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/9fd9ac629f274f7e9d0b4476b7c0d11c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/719fa184e58845d78137ac4da028759b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5f20c58a86d2470aac39fc963d8a41c5, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/2b16c313fb7044bfacf8ea0445b078b7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/6277a4d5387643dabfb3cdaeca5eb085, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/70c58799943745fa9a6c4a16a87fbced, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/4dc9778366ab48888e3b222131287426, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/1f829f2810a849b7a7d2e96cbe09a4b7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/b6cdd7cd44b34a02b259b72a1a46fcd9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/7c2ff4994f5042628ef6e2d3103b5fc4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/3ff6c418b8b145a5b1dd21da7fd5dbfd, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c1817c98b42440ba93d79cab406fac50, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/b28ab00182314b54a3e2a11a1a5ac1b2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/71ab3b475d4f493590db7bc5778913d2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/f8d80087baa04a48b4a622a8ef4be713, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5cac7cc874a647ed819af6b9c03676ee, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/fe20b44f1d284f50859902053a6ae01e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/47fab3e532004df48ac483fff0700ee2] to archive 2024-11-25T17:09:49,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:09:49,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/41047f472e724b388321ed62748895e4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/41047f472e724b388321ed62748895e4 2024-11-25T17:09:49,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/bf57c70851af47e4bc3b9b3048a0ed2f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/bf57c70851af47e4bc3b9b3048a0ed2f 2024-11-25T17:09:49,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/cd4b080922df40eca9f0ddebbf5dc375 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/cd4b080922df40eca9f0ddebbf5dc375 2024-11-25T17:09:49,397 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5891127b8e654972ba532afd36a39340 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5891127b8e654972ba532afd36a39340 2024-11-25T17:09:49,397 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/8519a0d6e9164fedad891329d582b6e1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/8519a0d6e9164fedad891329d582b6e1 2024-11-25T17:09:49,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/643fb7d9d56945dbaa1924634a802297 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/643fb7d9d56945dbaa1924634a802297 2024-11-25T17:09:49,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c28b809db06d438186f3847c42057821 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c28b809db06d438186f3847c42057821 2024-11-25T17:09:49,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c106c25eda3d4e7f9011d69fc72d88fe to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c106c25eda3d4e7f9011d69fc72d88fe 2024-11-25T17:09:49,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/9fd9ac629f274f7e9d0b4476b7c0d11c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/9fd9ac629f274f7e9d0b4476b7c0d11c 2024-11-25T17:09:49,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/719fa184e58845d78137ac4da028759b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/719fa184e58845d78137ac4da028759b 2024-11-25T17:09:49,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5f20c58a86d2470aac39fc963d8a41c5 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5f20c58a86d2470aac39fc963d8a41c5 2024-11-25T17:09:49,403 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/2b16c313fb7044bfacf8ea0445b078b7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/2b16c313fb7044bfacf8ea0445b078b7 2024-11-25T17:09:49,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/6277a4d5387643dabfb3cdaeca5eb085 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/6277a4d5387643dabfb3cdaeca5eb085 2024-11-25T17:09:49,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/70c58799943745fa9a6c4a16a87fbced to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/70c58799943745fa9a6c4a16a87fbced 2024-11-25T17:09:49,406 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/4dc9778366ab48888e3b222131287426 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/4dc9778366ab48888e3b222131287426 2024-11-25T17:09:49,407 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/1f829f2810a849b7a7d2e96cbe09a4b7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/1f829f2810a849b7a7d2e96cbe09a4b7 2024-11-25T17:09:49,408 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/b6cdd7cd44b34a02b259b72a1a46fcd9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/b6cdd7cd44b34a02b259b72a1a46fcd9 2024-11-25T17:09:49,408 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/7c2ff4994f5042628ef6e2d3103b5fc4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/7c2ff4994f5042628ef6e2d3103b5fc4 2024-11-25T17:09:49,409 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/3ff6c418b8b145a5b1dd21da7fd5dbfd to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/3ff6c418b8b145a5b1dd21da7fd5dbfd 2024-11-25T17:09:49,410 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c1817c98b42440ba93d79cab406fac50 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/c1817c98b42440ba93d79cab406fac50 2024-11-25T17:09:49,411 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/b28ab00182314b54a3e2a11a1a5ac1b2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/b28ab00182314b54a3e2a11a1a5ac1b2 2024-11-25T17:09:49,412 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/71ab3b475d4f493590db7bc5778913d2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/71ab3b475d4f493590db7bc5778913d2 2024-11-25T17:09:49,413 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/f8d80087baa04a48b4a622a8ef4be713 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/f8d80087baa04a48b4a622a8ef4be713 2024-11-25T17:09:49,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5cac7cc874a647ed819af6b9c03676ee to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/5cac7cc874a647ed819af6b9c03676ee 2024-11-25T17:09:49,415 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/fe20b44f1d284f50859902053a6ae01e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/fe20b44f1d284f50859902053a6ae01e 2024-11-25T17:09:49,415 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/47fab3e532004df48ac483fff0700ee2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/47fab3e532004df48ac483fff0700ee2 2024-11-25T17:09:49,416 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f1c6084ef7044e1c891c030e9d5a9112, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c2bfdc43c73a42a593bbd90de74c94d7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/cd618b4f25434ffbac8d04fdd236b451, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/5fb1dcb401e24d1691aa08adaf122f2d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/0affec2130284068a2c9563c2d369224, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/da37ba0f91364b48aa54173e08d03cdc, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c5025ecf0cd3471ba6a9a69da173b54c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/e851cf20085c41d2b0e9884a0d22e84e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/4a194a3c57334217950ec5b0af9ecf9f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/464ca64464d24141b3f9cf6fb7cb3307, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c6d89b7244e74186b69d9bcd0a9aa795, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/2066f5bd87dd47fa805e46f6e11c4623, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/31c464916e40444ab3ccd873287ae503, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/64797956b02647d19bde72768286ef14, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c01ec0a6fa0e4c068585173295326bad, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/0f59bf84ee36460b8f87eb1bdfcb82af, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/16b0c30691ce41719cad65aad94ebeec, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/6e2e7cb744854b18bed2c42131f46af2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/81a44d96b2924d97b2a32413bbe43b52, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/5fbadeab4ec54b3d9386c3b8901f97e6, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f8f90bff865f42dd819ea03ff6876df6, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/6fea467f7b19475b8b5655d8c6a97b85, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/beac34c654094c129a999a907fc9e7cb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f4381d54e52845c08533b6b4892f349b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/86e5862c462c48c099fd85b14117dd06, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/34a9737d9bb640ee99447e9d9f01267d] to archive 2024-11-25T17:09:49,417 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:09:49,418 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f1c6084ef7044e1c891c030e9d5a9112 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f1c6084ef7044e1c891c030e9d5a9112 2024-11-25T17:09:49,419 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c2bfdc43c73a42a593bbd90de74c94d7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c2bfdc43c73a42a593bbd90de74c94d7 2024-11-25T17:09:49,420 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/cd618b4f25434ffbac8d04fdd236b451 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/cd618b4f25434ffbac8d04fdd236b451 2024-11-25T17:09:49,420 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/5fb1dcb401e24d1691aa08adaf122f2d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/5fb1dcb401e24d1691aa08adaf122f2d 2024-11-25T17:09:49,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/0affec2130284068a2c9563c2d369224 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/0affec2130284068a2c9563c2d369224 2024-11-25T17:09:49,422 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/da37ba0f91364b48aa54173e08d03cdc to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/da37ba0f91364b48aa54173e08d03cdc 2024-11-25T17:09:49,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c5025ecf0cd3471ba6a9a69da173b54c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c5025ecf0cd3471ba6a9a69da173b54c 2024-11-25T17:09:49,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/e851cf20085c41d2b0e9884a0d22e84e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/e851cf20085c41d2b0e9884a0d22e84e 2024-11-25T17:09:49,424 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/4a194a3c57334217950ec5b0af9ecf9f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/4a194a3c57334217950ec5b0af9ecf9f 2024-11-25T17:09:49,425 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/464ca64464d24141b3f9cf6fb7cb3307 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/464ca64464d24141b3f9cf6fb7cb3307 2024-11-25T17:09:49,426 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c6d89b7244e74186b69d9bcd0a9aa795 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c6d89b7244e74186b69d9bcd0a9aa795 2024-11-25T17:09:49,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/2066f5bd87dd47fa805e46f6e11c4623 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/2066f5bd87dd47fa805e46f6e11c4623 2024-11-25T17:09:49,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/31c464916e40444ab3ccd873287ae503 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/31c464916e40444ab3ccd873287ae503 2024-11-25T17:09:49,428 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/64797956b02647d19bde72768286ef14 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/64797956b02647d19bde72768286ef14 2024-11-25T17:09:49,429 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c01ec0a6fa0e4c068585173295326bad to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/c01ec0a6fa0e4c068585173295326bad 2024-11-25T17:09:49,430 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/0f59bf84ee36460b8f87eb1bdfcb82af to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/0f59bf84ee36460b8f87eb1bdfcb82af 2024-11-25T17:09:49,431 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/16b0c30691ce41719cad65aad94ebeec to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/16b0c30691ce41719cad65aad94ebeec 2024-11-25T17:09:49,431 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/6e2e7cb744854b18bed2c42131f46af2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/6e2e7cb744854b18bed2c42131f46af2 2024-11-25T17:09:49,432 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/81a44d96b2924d97b2a32413bbe43b52 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/81a44d96b2924d97b2a32413bbe43b52 2024-11-25T17:09:49,433 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/5fbadeab4ec54b3d9386c3b8901f97e6 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/5fbadeab4ec54b3d9386c3b8901f97e6 2024-11-25T17:09:49,434 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f8f90bff865f42dd819ea03ff6876df6 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f8f90bff865f42dd819ea03ff6876df6 2024-11-25T17:09:49,434 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/6fea467f7b19475b8b5655d8c6a97b85 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/6fea467f7b19475b8b5655d8c6a97b85 2024-11-25T17:09:49,435 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/beac34c654094c129a999a907fc9e7cb to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/beac34c654094c129a999a907fc9e7cb 2024-11-25T17:09:49,436 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f4381d54e52845c08533b6b4892f349b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/f4381d54e52845c08533b6b4892f349b 2024-11-25T17:09:49,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/86e5862c462c48c099fd85b14117dd06 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/86e5862c462c48c099fd85b14117dd06 2024-11-25T17:09:49,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/34a9737d9bb640ee99447e9d9f01267d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/34a9737d9bb640ee99447e9d9f01267d 2024-11-25T17:09:49,440 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/recovered.edits/389.seqid, newMaxSeqId=389, maxSeqId=4 2024-11-25T17:09:49,441 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4. 2024-11-25T17:09:49,441 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1635): Region close journal for 59409bf208d66df7ccc7026d9c7a73c4: 2024-11-25T17:09:49,442 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(170): Closed 59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,442 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=59409bf208d66df7ccc7026d9c7a73c4, regionState=CLOSED 2024-11-25T17:09:49,444 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-25T17:09:49,444 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseRegionProcedure 59409bf208d66df7ccc7026d9c7a73c4, server=6579369734b6,41865,1732554474464 in 1.9320 sec 2024-11-25T17:09:49,445 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=124, resume processing ppid=123 2024-11-25T17:09:49,445 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, ppid=123, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=59409bf208d66df7ccc7026d9c7a73c4, UNASSIGN in 1.9360 sec 2024-11-25T17:09:49,446 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-25T17:09:49,447 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9390 sec 2024-11-25T17:09:49,447 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554589447"}]},"ts":"1732554589447"} 2024-11-25T17:09:49,448 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-25T17:09:49,450 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-25T17:09:49,451 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9590 sec 2024-11-25T17:09:49,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-25T17:09:49,598 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-25T17:09:49,599 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-25T17:09:49,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:49,600 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:49,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-25T17:09:49,600 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=126, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:49,602 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,604 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/recovered.edits] 2024-11-25T17:09:49,606 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/8d931e8bbcee4528a8e4b34a97fa53a5 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/8d931e8bbcee4528a8e4b34a97fa53a5 2024-11-25T17:09:49,607 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/930a705467a24f0e9e0375a009b120ac to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/A/930a705467a24f0e9e0375a009b120ac 2024-11-25T17:09:49,612 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/e4dba598114a4d26a8ac633a64aa024b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/e4dba598114a4d26a8ac633a64aa024b 2024-11-25T17:09:49,614 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/fc095ab75324451eaf245bdca853eff6 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/B/fc095ab75324451eaf245bdca853eff6 2024-11-25T17:09:49,616 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/06278c2a943448f39538f0e667d22d23 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/06278c2a943448f39538f0e667d22d23 2024-11-25T17:09:49,617 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/09c7a3c58c254bb58e2f8e637a8a47db to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/C/09c7a3c58c254bb58e2f8e637a8a47db 2024-11-25T17:09:49,619 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/recovered.edits/389.seqid to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4/recovered.edits/389.seqid 2024-11-25T17:09:49,620 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,620 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-25T17:09:49,620 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-25T17:09:49,621 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-25T17:09:49,623 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112502b07f1c31304be8a257689ac9232c60_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112502b07f1c31304be8a257689ac9232c60_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,624 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125035eb2ebfe714cf7942850e5875d473b_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125035eb2ebfe714cf7942850e5875d473b_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,625 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125036d01c6f5c24df5aeb7c1d291c030c9_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125036d01c6f5c24df5aeb7c1d291c030c9_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,626 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411251104dd92839a42e3910e96214aad9560_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411251104dd92839a42e3910e96214aad9560_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,627 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125208475924862428db19b93aa1d35b365_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125208475924862428db19b93aa1d35b365_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,629 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112529f6b22bd76444d4916b89efcd0f4e74_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112529f6b22bd76444d4916b89efcd0f4e74_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,630 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112532363a3659a146deaad937abbdb08a00_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112532363a3659a146deaad937abbdb08a00_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,631 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411253fe10e8fbdf8475990f43dfdb84c44bc_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411253fe10e8fbdf8475990f43dfdb84c44bc_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,631 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411254364b71778dc492ab85123e8a4a55e11_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411254364b71778dc492ab85123e8a4a55e11_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,632 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411256d9c1409b12e4e4b861f117e1fa05ead_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411256d9c1409b12e4e4b861f117e1fa05ead_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,633 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411257f706515bf534a819c9dd85ee2c6c368_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411257f706515bf534a819c9dd85ee2c6c368_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,634 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411258a156af6e1bd4aaeba2c24ed151106af_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411258a156af6e1bd4aaeba2c24ed151106af_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,635 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411259676fdd531e842b98bbbd19aef197154_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411259676fdd531e842b98bbbd19aef197154_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,637 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411259b160196c880479fb735d705efa3a64f_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411259b160196c880479fb735d705efa3a64f_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,638 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ab909632f6bc44358218679b2b1459f5_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ab909632f6bc44358218679b2b1459f5_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,639 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ac3f471ce852457fa349bd68d9008b40_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ac3f471ce852457fa349bd68d9008b40_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,640 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125b07c25e0897b4821b03886449379c54a_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125b07c25e0897b4821b03886449379c54a_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,641 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125bb4ecca4135b4d0195fbfdae46baf3ce_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125bb4ecca4135b4d0195fbfdae46baf3ce_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,642 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125e848a5a0ba834ae6af5662b5e1cb786c_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125e848a5a0ba834ae6af5662b5e1cb786c_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,643 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ea1c7bb8f798448280cd453683744753_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ea1c7bb8f798448280cd453683744753_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,644 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125f28e6c09482647ebbe1774cb84574365_59409bf208d66df7ccc7026d9c7a73c4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125f28e6c09482647ebbe1774cb84574365_59409bf208d66df7ccc7026d9c7a73c4 2024-11-25T17:09:49,644 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-25T17:09:49,646 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=126, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:49,648 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-25T17:09:49,650 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-25T17:09:49,651 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=126, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:49,651 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-25T17:09:49,651 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732554589651"}]},"ts":"9223372036854775807"} 2024-11-25T17:09:49,652 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-25T17:09:49,652 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 59409bf208d66df7ccc7026d9c7a73c4, NAME => 'TestAcidGuarantees,,1732554561380.59409bf208d66df7ccc7026d9c7a73c4.', STARTKEY => '', ENDKEY => ''}] 2024-11-25T17:09:49,652 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-25T17:09:49,652 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732554589652"}]},"ts":"9223372036854775807"} 2024-11-25T17:09:49,654 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-25T17:09:49,659 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=126, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:49,659 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 60 msec 2024-11-25T17:09:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-25T17:09:49,701 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-25T17:09:49,713 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=241 (was 239) - Thread LEAK? -, OpenFileDescriptor=463 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=691 (was 708), ProcessCount=11 (was 11), AvailableMemoryMB=3341 (was 2231) - AvailableMemoryMB LEAK? - 2024-11-25T17:09:49,727 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=241, OpenFileDescriptor=463, MaxFileDescriptor=1048576, SystemLoadAverage=691, ProcessCount=11, AvailableMemoryMB=3340 2024-11-25T17:09:49,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-25T17:09:49,729 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T17:09:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-25T17:09:49,732 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T17:09:49,733 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:49,733 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 127 2024-11-25T17:09:49,733 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T17:09:49,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-25T17:09:49,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742282_1458 (size=963) 2024-11-25T17:09:49,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-25T17:09:50,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-25T17:09:50,144 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4 2024-11-25T17:09:50,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742283_1459 (size=53) 2024-11-25T17:09:50,152 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:09:50,152 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 3c0802cb7cf476d143cab96601b733ab, disabling compactions & flushes 2024-11-25T17:09:50,152 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:50,152 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:50,152 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. after waiting 0 ms 2024-11-25T17:09:50,152 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:50,152 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:50,152 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:50,153 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T17:09:50,153 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732554590153"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732554590153"}]},"ts":"1732554590153"} 2024-11-25T17:09:50,162 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-25T17:09:50,163 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T17:09:50,163 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554590163"}]},"ts":"1732554590163"} 2024-11-25T17:09:50,165 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-25T17:09:50,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c0802cb7cf476d143cab96601b733ab, ASSIGN}] 2024-11-25T17:09:50,172 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c0802cb7cf476d143cab96601b733ab, ASSIGN 2024-11-25T17:09:50,172 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c0802cb7cf476d143cab96601b733ab, ASSIGN; state=OFFLINE, location=6579369734b6,41865,1732554474464; forceNewPlan=false, retain=false 2024-11-25T17:09:50,325 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=3c0802cb7cf476d143cab96601b733ab, regionState=OPENING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:09:50,334 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; OpenRegionProcedure 3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:09:50,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-25T17:09:50,493 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:50,495 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:50,496 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7285): Opening region: {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} 2024-11-25T17:09:50,496 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:50,496 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:09:50,496 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7327): checking encryption for 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:50,496 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7330): checking classloading for 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:50,497 INFO [StoreOpener-3c0802cb7cf476d143cab96601b733ab-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:50,498 INFO [StoreOpener-3c0802cb7cf476d143cab96601b733ab-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:09:50,498 INFO [StoreOpener-3c0802cb7cf476d143cab96601b733ab-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c0802cb7cf476d143cab96601b733ab columnFamilyName A 2024-11-25T17:09:50,498 DEBUG [StoreOpener-3c0802cb7cf476d143cab96601b733ab-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:50,499 INFO [StoreOpener-3c0802cb7cf476d143cab96601b733ab-1 {}] regionserver.HStore(327): Store=3c0802cb7cf476d143cab96601b733ab/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:09:50,499 INFO [StoreOpener-3c0802cb7cf476d143cab96601b733ab-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:50,500 INFO [StoreOpener-3c0802cb7cf476d143cab96601b733ab-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:09:50,500 INFO [StoreOpener-3c0802cb7cf476d143cab96601b733ab-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c0802cb7cf476d143cab96601b733ab columnFamilyName B 2024-11-25T17:09:50,500 DEBUG [StoreOpener-3c0802cb7cf476d143cab96601b733ab-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:50,501 INFO [StoreOpener-3c0802cb7cf476d143cab96601b733ab-1 {}] regionserver.HStore(327): Store=3c0802cb7cf476d143cab96601b733ab/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:09:50,501 INFO [StoreOpener-3c0802cb7cf476d143cab96601b733ab-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:50,502 INFO [StoreOpener-3c0802cb7cf476d143cab96601b733ab-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:09:50,502 INFO [StoreOpener-3c0802cb7cf476d143cab96601b733ab-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3c0802cb7cf476d143cab96601b733ab columnFamilyName C 2024-11-25T17:09:50,502 DEBUG [StoreOpener-3c0802cb7cf476d143cab96601b733ab-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:09:50,503 INFO [StoreOpener-3c0802cb7cf476d143cab96601b733ab-1 {}] regionserver.HStore(327): Store=3c0802cb7cf476d143cab96601b733ab/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:09:50,503 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:50,504 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:50,504 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:50,505 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T17:09:50,506 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1085): writing seq id for 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:50,508 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T17:09:50,508 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1102): Opened 3c0802cb7cf476d143cab96601b733ab; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64274805, jitterRate=-0.042230769991874695}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T17:09:50,509 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1001): Region open journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:50,510 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., pid=129, masterSystemTime=1732554590492 2024-11-25T17:09:50,512 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:50,512 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:50,512 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=3c0802cb7cf476d143cab96601b733ab, regionState=OPEN, openSeqNum=2, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:09:50,514 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-25T17:09:50,514 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; OpenRegionProcedure 3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 in 179 msec 2024-11-25T17:09:50,516 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=128, resume processing ppid=127 2024-11-25T17:09:50,516 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, ppid=127, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c0802cb7cf476d143cab96601b733ab, ASSIGN in 344 msec 2024-11-25T17:09:50,516 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T17:09:50,516 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554590516"}]},"ts":"1732554590516"} 2024-11-25T17:09:50,517 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-25T17:09:50,520 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T17:09:50,520 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 791 msec 2024-11-25T17:09:50,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-25T17:09:50,842 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 127 completed 2024-11-25T17:09:50,844 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3cb726fe to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@59bd764a 2024-11-25T17:09:50,847 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@238db126, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:50,848 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:50,850 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50800, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:50,851 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T17:09:50,851 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42132, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T17:09:50,853 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x301741f1 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22a6e9f 2024-11-25T17:09:50,856 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c60eb7d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:50,857 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x63cefe40 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@32c12a30 2024-11-25T17:09:50,859 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79b10416, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:50,860 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65df2359 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5ef40578 2024-11-25T17:09:50,862 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f142b04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:50,863 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d0ab200 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@32bb71c 2024-11-25T17:09:50,865 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@de9f076, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:50,866 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5871c039 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bc0f7c 2024-11-25T17:09:50,868 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4414259d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:50,869 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b8b6e04 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ed69825 2024-11-25T17:09:50,874 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34b30c39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:50,875 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bc486e1 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11193a0c 2024-11-25T17:09:50,877 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d672ed2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:50,878 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2070263a to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7861b162 2024-11-25T17:09:50,883 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cf40102, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:50,884 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6050584c to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@154f0f85 2024-11-25T17:09:50,888 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@496fe03f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:50,888 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6dd48863 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8a917b 2024-11-25T17:09:50,892 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3652e74d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:09:50,900 DEBUG [hconnection-0x6fc3b276-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:50,902 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50804, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:50,903 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:50,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-25T17:09:50,905 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:50,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-25T17:09:50,905 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:50,906 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:50,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:50,916 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:09:50,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:09:50,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:50,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:09:50,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:50,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:09:50,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:50,917 DEBUG [hconnection-0x5a026a82-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:50,918 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50820, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:50,921 DEBUG [hconnection-0x4c97fd38-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:50,922 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50832, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:50,925 DEBUG [hconnection-0x536c23fb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:50,926 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50844, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:50,927 DEBUG [hconnection-0x6c2f3258-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:50,927 DEBUG [hconnection-0x58a5ec77-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:50,927 DEBUG [hconnection-0x6f6059ab-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:50,928 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50854, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:50,928 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50856, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:50,928 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50846, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:50,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:50,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554650944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:50,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:50,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554650944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:50,947 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/03ecdb590d0b4addbf9a3ccb895efa82 is 50, key is test_row_0/A:col10/1732554590914/Put/seqid=0 2024-11-25T17:09:50,948 DEBUG [hconnection-0x4746177f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:50,948 DEBUG [hconnection-0x6c2c22a9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:50,949 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50862, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:50,949 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50878, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:50,950 DEBUG [hconnection-0x147a5b94-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:09:50,951 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50884, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:09:50,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:50,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554650951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:50,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:50,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554650952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:50,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:50,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554650952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:50,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742284_1460 (size=12001) 2024-11-25T17:09:50,963 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/03ecdb590d0b4addbf9a3ccb895efa82 2024-11-25T17:09:50,992 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/dbeeef6d81424082b34007799d85f2a4 is 50, key is test_row_0/B:col10/1732554590914/Put/seqid=0 2024-11-25T17:09:50,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742285_1461 (size=12001) 2024-11-25T17:09:51,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-25T17:09:51,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554651045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554651045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,057 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:51,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-25T17:09:51,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:51,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:51,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:51,058 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:51,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:51,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:51,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554651060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554651060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554651060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-25T17:09:51,209 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:51,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-25T17:09:51,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:51,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:51,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:51,210 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:51,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:51,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:51,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554651249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554651259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554651267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554651267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554651270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,365 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:51,366 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-25T17:09:51,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:51,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:51,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:51,366 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:51,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:51,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:51,402 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/dbeeef6d81424082b34007799d85f2a4 2024-11-25T17:09:51,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/9d0707c6caca47018de83b06f16a7de0 is 50, key is test_row_0/C:col10/1732554590914/Put/seqid=0 2024-11-25T17:09:51,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742286_1462 (size=12001) 2024-11-25T17:09:51,456 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/9d0707c6caca47018de83b06f16a7de0 2024-11-25T17:09:51,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/03ecdb590d0b4addbf9a3ccb895efa82 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/03ecdb590d0b4addbf9a3ccb895efa82 2024-11-25T17:09:51,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/03ecdb590d0b4addbf9a3ccb895efa82, entries=150, sequenceid=12, filesize=11.7 K 2024-11-25T17:09:51,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/dbeeef6d81424082b34007799d85f2a4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/dbeeef6d81424082b34007799d85f2a4 2024-11-25T17:09:51,476 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/dbeeef6d81424082b34007799d85f2a4, entries=150, sequenceid=12, filesize=11.7 K 2024-11-25T17:09:51,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/9d0707c6caca47018de83b06f16a7de0 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/9d0707c6caca47018de83b06f16a7de0 2024-11-25T17:09:51,480 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/9d0707c6caca47018de83b06f16a7de0, entries=150, sequenceid=12, filesize=11.7 K 2024-11-25T17:09:51,482 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 3c0802cb7cf476d143cab96601b733ab in 566ms, sequenceid=12, compaction requested=false 2024-11-25T17:09:51,482 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-25T17:09:51,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:51,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-25T17:09:51,518 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:51,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-25T17:09:51,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:51,519 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-25T17:09:51,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:09:51,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:51,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:09:51,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:51,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:09:51,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:51,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/4f9588d46d75400bbe3ccf6305a62917 is 50, key is test_row_0/A:col10/1732554590941/Put/seqid=0 2024-11-25T17:09:51,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742287_1463 (size=12001) 2024-11-25T17:09:51,556 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/4f9588d46d75400bbe3ccf6305a62917 2024-11-25T17:09:51,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:51,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:51,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/99539e8e59b5461ca9313f70e8a22462 is 50, key is test_row_0/B:col10/1732554590941/Put/seqid=0 2024-11-25T17:09:51,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742288_1464 (size=12001) 2024-11-25T17:09:51,574 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/99539e8e59b5461ca9313f70e8a22462 2024-11-25T17:09:51,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554651571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554651574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554651574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554651575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554651576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/5b9b20c8ab614e72a53416a89fc8443c is 50, key is test_row_0/C:col10/1732554590941/Put/seqid=0 2024-11-25T17:09:51,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742289_1465 (size=12001) 2024-11-25T17:09:51,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554651678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554651681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554651884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:51,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554651889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:51,999 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/5b9b20c8ab614e72a53416a89fc8443c 2024-11-25T17:09:52,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/4f9588d46d75400bbe3ccf6305a62917 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4f9588d46d75400bbe3ccf6305a62917 2024-11-25T17:09:52,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-25T17:09:52,011 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4f9588d46d75400bbe3ccf6305a62917, entries=150, sequenceid=38, filesize=11.7 K 2024-11-25T17:09:52,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/99539e8e59b5461ca9313f70e8a22462 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/99539e8e59b5461ca9313f70e8a22462 2024-11-25T17:09:52,016 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/99539e8e59b5461ca9313f70e8a22462, entries=150, sequenceid=38, filesize=11.7 K 2024-11-25T17:09:52,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-25T17:09:52,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/5b9b20c8ab614e72a53416a89fc8443c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/5b9b20c8ab614e72a53416a89fc8443c 2024-11-25T17:09:52,023 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/5b9b20c8ab614e72a53416a89fc8443c, entries=150, sequenceid=38, filesize=11.7 K 2024-11-25T17:09:52,024 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 3c0802cb7cf476d143cab96601b733ab in 505ms, sequenceid=38, compaction requested=false 2024-11-25T17:09:52,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:52,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:52,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-25T17:09:52,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-25T17:09:52,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-25T17:09:52,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1200 sec 2024-11-25T17:09:52,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 1.1260 sec 2024-11-25T17:09:52,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:52,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-25T17:09:52,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:09:52,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:52,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:09:52,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:52,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:09:52,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:52,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/72b29c3611884c92b12ba325edac45d3 is 50, key is test_row_0/A:col10/1732554592094/Put/seqid=0 2024-11-25T17:09:52,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742290_1466 (size=14337) 2024-11-25T17:09:52,128 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/72b29c3611884c92b12ba325edac45d3 2024-11-25T17:09:52,139 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/852d10e2e1be4506860a0157792c47d9 is 50, key is test_row_0/B:col10/1732554592094/Put/seqid=0 2024-11-25T17:09:52,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742291_1467 (size=9657) 2024-11-25T17:09:52,164 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/852d10e2e1be4506860a0157792c47d9 2024-11-25T17:09:52,175 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554652166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554652167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554652168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/323bd4352f634bcf8e09e5c52a97d2df is 50, key is test_row_0/C:col10/1732554592094/Put/seqid=0 2024-11-25T17:09:52,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742292_1468 (size=9657) 2024-11-25T17:09:52,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554652189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554652199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554652282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554652282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554652283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554652488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554652497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554652512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/323bd4352f634bcf8e09e5c52a97d2df 2024-11-25T17:09:52,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/72b29c3611884c92b12ba325edac45d3 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/72b29c3611884c92b12ba325edac45d3 2024-11-25T17:09:52,602 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/72b29c3611884c92b12ba325edac45d3, entries=200, sequenceid=50, filesize=14.0 K 2024-11-25T17:09:52,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/852d10e2e1be4506860a0157792c47d9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/852d10e2e1be4506860a0157792c47d9 2024-11-25T17:09:52,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/852d10e2e1be4506860a0157792c47d9, entries=100, sequenceid=50, filesize=9.4 K 2024-11-25T17:09:52,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/323bd4352f634bcf8e09e5c52a97d2df as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/323bd4352f634bcf8e09e5c52a97d2df 2024-11-25T17:09:52,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/323bd4352f634bcf8e09e5c52a97d2df, entries=100, sequenceid=50, filesize=9.4 K 2024-11-25T17:09:52,614 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 3c0802cb7cf476d143cab96601b733ab in 518ms, sequenceid=50, compaction requested=true 2024-11-25T17:09:52,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:52,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:52,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:52,614 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:52,614 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:52,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:52,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:52,615 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:52,615 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38339 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:52,615 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/B is initiating minor compaction (all files) 2024-11-25T17:09:52,615 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/A is initiating minor compaction (all files) 2024-11-25T17:09:52,615 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/B in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:52,615 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/A in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:52,615 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/dbeeef6d81424082b34007799d85f2a4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/99539e8e59b5461ca9313f70e8a22462, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/852d10e2e1be4506860a0157792c47d9] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=32.9 K 2024-11-25T17:09:52,615 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/03ecdb590d0b4addbf9a3ccb895efa82, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4f9588d46d75400bbe3ccf6305a62917, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/72b29c3611884c92b12ba325edac45d3] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=37.4 K 2024-11-25T17:09:52,615 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03ecdb590d0b4addbf9a3ccb895efa82, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732554590910 2024-11-25T17:09:52,615 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting dbeeef6d81424082b34007799d85f2a4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732554590910 2024-11-25T17:09:52,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:52,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:52,616 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 99539e8e59b5461ca9313f70e8a22462, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732554590934 2024-11-25T17:09:52,616 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f9588d46d75400bbe3ccf6305a62917, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732554590934 2024-11-25T17:09:52,616 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 852d10e2e1be4506860a0157792c47d9, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732554592093 2024-11-25T17:09:52,616 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72b29c3611884c92b12ba325edac45d3, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732554591569 2024-11-25T17:09:52,624 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T17:09:52,626 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#B#compaction#391 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:52,626 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/29f12ea0c9c64b0f805198077a6e50db is 50, key is test_row_0/B:col10/1732554592094/Put/seqid=0 2024-11-25T17:09:52,631 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#A#compaction#392 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:52,632 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/b952fcd6748b46fdbce498641f4e55c3 is 50, key is test_row_0/A:col10/1732554592094/Put/seqid=0 2024-11-25T17:09:52,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742294_1470 (size=12104) 2024-11-25T17:09:52,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742293_1469 (size=12104) 2024-11-25T17:09:52,678 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/29f12ea0c9c64b0f805198077a6e50db as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/29f12ea0c9c64b0f805198077a6e50db 2024-11-25T17:09:52,680 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/b952fcd6748b46fdbce498641f4e55c3 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/b952fcd6748b46fdbce498641f4e55c3 2024-11-25T17:09:52,687 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/A of 3c0802cb7cf476d143cab96601b733ab into b952fcd6748b46fdbce498641f4e55c3(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:52,687 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:52,687 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/A, priority=13, startTime=1732554592614; duration=0sec 2024-11-25T17:09:52,687 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:52,687 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:A 2024-11-25T17:09:52,687 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:52,688 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/B of 3c0802cb7cf476d143cab96601b733ab into 29f12ea0c9c64b0f805198077a6e50db(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:52,688 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:52,688 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/B, priority=13, startTime=1732554592614; duration=0sec 2024-11-25T17:09:52,688 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:52,688 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:B 2024-11-25T17:09:52,690 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:52,690 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/C is initiating minor compaction (all files) 2024-11-25T17:09:52,690 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/C in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:52,690 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/9d0707c6caca47018de83b06f16a7de0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/5b9b20c8ab614e72a53416a89fc8443c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/323bd4352f634bcf8e09e5c52a97d2df] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=32.9 K 2024-11-25T17:09:52,690 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d0707c6caca47018de83b06f16a7de0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732554590910 2024-11-25T17:09:52,691 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b9b20c8ab614e72a53416a89fc8443c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732554590934 2024-11-25T17:09:52,691 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 323bd4352f634bcf8e09e5c52a97d2df, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732554592093 2024-11-25T17:09:52,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:52,702 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-25T17:09:52,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:09:52,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:52,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:09:52,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:52,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:09:52,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:52,705 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#C#compaction#393 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:52,706 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/895a159d483743bd91583650a132fb01 is 50, key is test_row_0/C:col10/1732554592094/Put/seqid=0 2024-11-25T17:09:52,707 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/eb85fd72748c4cd098adb8769cff0ef0 is 50, key is test_row_0/A:col10/1732554592700/Put/seqid=0 2024-11-25T17:09:52,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742295_1471 (size=12104) 2024-11-25T17:09:52,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742296_1472 (size=14341) 2024-11-25T17:09:52,740 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554652735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554652735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554652791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554652810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554652817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554652841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:52,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:52,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554652842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:53,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-25T17:09:53,014 INFO [Thread-2051 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-25T17:09:53,023 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:53,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-25T17:09:53,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-25T17:09:53,024 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:53,025 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:53,025 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:53,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:53,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554653050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:53,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:53,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554653055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:53,118 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/eb85fd72748c4cd098adb8769cff0ef0 2024-11-25T17:09:53,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-25T17:09:53,127 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/895a159d483743bd91583650a132fb01 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/895a159d483743bd91583650a132fb01 2024-11-25T17:09:53,134 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/C of 3c0802cb7cf476d143cab96601b733ab into 895a159d483743bd91583650a132fb01(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:53,134 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:53,134 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/C, priority=13, startTime=1732554592614; duration=0sec 2024-11-25T17:09:53,134 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:53,134 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:C 2024-11-25T17:09:53,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/edc9eb206ee6419fa0e609a11841a16b is 50, key is test_row_0/B:col10/1732554592700/Put/seqid=0 2024-11-25T17:09:53,178 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:53,181 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-25T17:09:53,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:53,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:53,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:53,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:53,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:53,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:53,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742297_1473 (size=12001) 2024-11-25T17:09:53,200 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/edc9eb206ee6419fa0e609a11841a16b 2024-11-25T17:09:53,215 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/67a7f0d95f9d40e49abb2796b6de47e2 is 50, key is test_row_0/C:col10/1732554592700/Put/seqid=0 2024-11-25T17:09:53,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742298_1474 (size=12001) 2024-11-25T17:09:53,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:53,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554653298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:53,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:53,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554653322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:53,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-25T17:09:53,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554653329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:53,333 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:53,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-25T17:09:53,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:53,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:53,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:53,338 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:53,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:53,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:53,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554653356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:53,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554653365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:53,490 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:53,491 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-25T17:09:53,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:53,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:53,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:53,491 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:53,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:53,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:53,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-25T17:09:53,645 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:53,646 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-25T17:09:53,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:53,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:53,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:53,646 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:53,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:53,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:53,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/67a7f0d95f9d40e49abb2796b6de47e2 2024-11-25T17:09:53,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/eb85fd72748c4cd098adb8769cff0ef0 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/eb85fd72748c4cd098adb8769cff0ef0 2024-11-25T17:09:53,670 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/eb85fd72748c4cd098adb8769cff0ef0, entries=200, sequenceid=77, filesize=14.0 K 2024-11-25T17:09:53,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/edc9eb206ee6419fa0e609a11841a16b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/edc9eb206ee6419fa0e609a11841a16b 2024-11-25T17:09:53,676 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/edc9eb206ee6419fa0e609a11841a16b, entries=150, sequenceid=77, filesize=11.7 K 2024-11-25T17:09:53,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/67a7f0d95f9d40e49abb2796b6de47e2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/67a7f0d95f9d40e49abb2796b6de47e2 2024-11-25T17:09:53,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/67a7f0d95f9d40e49abb2796b6de47e2, entries=150, sequenceid=77, filesize=11.7 K 2024-11-25T17:09:53,681 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 3c0802cb7cf476d143cab96601b733ab in 980ms, sequenceid=77, compaction requested=false 2024-11-25T17:09:53,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:53,801 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:53,802 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-25T17:09:53,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:53,802 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-25T17:09:53,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:09:53,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:53,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:09:53,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:53,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:09:53,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:53,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/e7c572f504834ef6a18b2fb6044b1825 is 50, key is test_row_0/A:col10/1732554592733/Put/seqid=0 2024-11-25T17:09:53,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742299_1475 (size=12001) 2024-11-25T17:09:53,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:53,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:53,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:53,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554653960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:53,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:53,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554653962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:54,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:54,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554654071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:54,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:54,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554654071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:54,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-25T17:09:54,240 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/e7c572f504834ef6a18b2fb6044b1825 2024-11-25T17:09:54,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/07b8aaaa506348eca299b0a4811c070d is 50, key is test_row_0/B:col10/1732554592733/Put/seqid=0 2024-11-25T17:09:54,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742300_1476 (size=12001) 2024-11-25T17:09:54,274 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/07b8aaaa506348eca299b0a4811c070d 2024-11-25T17:09:54,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:54,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554654280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:54,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:54,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554654285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:54,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/5f579f7adcc14a01973f3373a7f82b5d is 50, key is test_row_0/C:col10/1732554592733/Put/seqid=0 2024-11-25T17:09:54,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742301_1477 (size=12001) 2024-11-25T17:09:54,309 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/5f579f7adcc14a01973f3373a7f82b5d 2024-11-25T17:09:54,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/e7c572f504834ef6a18b2fb6044b1825 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e7c572f504834ef6a18b2fb6044b1825 2024-11-25T17:09:54,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:54,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554654312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:54,318 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e7c572f504834ef6a18b2fb6044b1825, entries=150, sequenceid=90, filesize=11.7 K 2024-11-25T17:09:54,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/07b8aaaa506348eca299b0a4811c070d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/07b8aaaa506348eca299b0a4811c070d 2024-11-25T17:09:54,324 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/07b8aaaa506348eca299b0a4811c070d, entries=150, sequenceid=90, filesize=11.7 K 2024-11-25T17:09:54,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/5f579f7adcc14a01973f3373a7f82b5d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/5f579f7adcc14a01973f3373a7f82b5d 2024-11-25T17:09:54,330 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/5f579f7adcc14a01973f3373a7f82b5d, entries=150, sequenceid=90, filesize=11.7 K 2024-11-25T17:09:54,331 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 3c0802cb7cf476d143cab96601b733ab in 529ms, sequenceid=90, compaction requested=true 2024-11-25T17:09:54,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:54,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:54,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-25T17:09:54,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-25T17:09:54,334 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-25T17:09:54,334 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3070 sec 2024-11-25T17:09:54,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:54,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-25T17:09:54,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:09:54,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:54,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:09:54,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:54,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:09:54,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:54,336 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.3120 sec 2024-11-25T17:09:54,340 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/eeb9db151ef54c54bf52631aa9b34712 is 50, key is test_row_0/A:col10/1732554594334/Put/seqid=0 2024-11-25T17:09:54,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742302_1478 (size=14341) 2024-11-25T17:09:54,348 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/eeb9db151ef54c54bf52631aa9b34712 2024-11-25T17:09:54,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/7319b11a66564e57b8bc645e1ac3054d is 50, key is test_row_0/B:col10/1732554594334/Put/seqid=0 2024-11-25T17:09:54,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:54,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554654368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:54,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742303_1479 (size=12001) 2024-11-25T17:09:54,378 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/7319b11a66564e57b8bc645e1ac3054d 2024-11-25T17:09:54,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/12f4f9d785ce47cb9d28c2e98f22494f is 50, key is test_row_0/C:col10/1732554594334/Put/seqid=0 2024-11-25T17:09:54,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:54,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554654385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:54,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742304_1480 (size=12001) 2024-11-25T17:09:54,404 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/12f4f9d785ce47cb9d28c2e98f22494f 2024-11-25T17:09:54,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/eeb9db151ef54c54bf52631aa9b34712 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/eeb9db151ef54c54bf52631aa9b34712 2024-11-25T17:09:54,417 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/eeb9db151ef54c54bf52631aa9b34712, entries=200, sequenceid=115, filesize=14.0 K 2024-11-25T17:09:54,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/7319b11a66564e57b8bc645e1ac3054d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7319b11a66564e57b8bc645e1ac3054d 2024-11-25T17:09:54,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7319b11a66564e57b8bc645e1ac3054d, entries=150, sequenceid=115, filesize=11.7 K 2024-11-25T17:09:54,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/12f4f9d785ce47cb9d28c2e98f22494f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/12f4f9d785ce47cb9d28c2e98f22494f 2024-11-25T17:09:54,426 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/12f4f9d785ce47cb9d28c2e98f22494f, entries=150, sequenceid=115, filesize=11.7 K 2024-11-25T17:09:54,427 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 3c0802cb7cf476d143cab96601b733ab in 92ms, sequenceid=115, compaction requested=true 2024-11-25T17:09:54,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:54,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:54,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:54,427 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:54,427 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:54,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:54,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:54,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:54,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:54,428 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:54,428 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/B is initiating minor compaction (all files) 2024-11-25T17:09:54,429 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/B in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:54,429 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/29f12ea0c9c64b0f805198077a6e50db, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/edc9eb206ee6419fa0e609a11841a16b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/07b8aaaa506348eca299b0a4811c070d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7319b11a66564e57b8bc645e1ac3054d] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=47.0 K 2024-11-25T17:09:54,429 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52787 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:54,429 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/A is initiating minor compaction (all files) 2024-11-25T17:09:54,429 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/A in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:54,429 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/b952fcd6748b46fdbce498641f4e55c3, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/eb85fd72748c4cd098adb8769cff0ef0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e7c572f504834ef6a18b2fb6044b1825, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/eeb9db151ef54c54bf52631aa9b34712] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=51.5 K 2024-11-25T17:09:54,429 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 29f12ea0c9c64b0f805198077a6e50db, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732554590943 2024-11-25T17:09:54,430 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting b952fcd6748b46fdbce498641f4e55c3, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732554590943 2024-11-25T17:09:54,430 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting edc9eb206ee6419fa0e609a11841a16b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732554592153 2024-11-25T17:09:54,430 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb85fd72748c4cd098adb8769cff0ef0, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732554592151 2024-11-25T17:09:54,430 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 07b8aaaa506348eca299b0a4811c070d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732554592726 2024-11-25T17:09:54,431 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 7319b11a66564e57b8bc645e1ac3054d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732554593938 2024-11-25T17:09:54,431 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7c572f504834ef6a18b2fb6044b1825, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732554592726 2024-11-25T17:09:54,431 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting eeb9db151ef54c54bf52631aa9b34712, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732554593930 2024-11-25T17:09:54,444 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#B#compaction#403 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:54,445 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/6ee16fe2e83e48e4907f821ce3155305 is 50, key is test_row_0/B:col10/1732554594334/Put/seqid=0 2024-11-25T17:09:54,472 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#A#compaction#404 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:54,472 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/e52358b2e5b840b69af2e25b8af50401 is 50, key is test_row_0/A:col10/1732554594334/Put/seqid=0 2024-11-25T17:09:54,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-25T17:09:54,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:09:54,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:54,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:09:54,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:54,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:09:54,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:54,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:54,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/3a6b55984a86448c9646d6a81bd40354 is 50, key is test_row_0/A:col10/1732554594479/Put/seqid=0 2024-11-25T17:09:54,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742306_1482 (size=12241) 2024-11-25T17:09:54,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742305_1481 (size=12241) 2024-11-25T17:09:54,509 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/e52358b2e5b840b69af2e25b8af50401 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e52358b2e5b840b69af2e25b8af50401 2024-11-25T17:09:54,511 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/6ee16fe2e83e48e4907f821ce3155305 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6ee16fe2e83e48e4907f821ce3155305 2024-11-25T17:09:54,518 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/A of 3c0802cb7cf476d143cab96601b733ab into e52358b2e5b840b69af2e25b8af50401(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:54,518 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:54,518 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/A, priority=12, startTime=1732554594427; duration=0sec 2024-11-25T17:09:54,518 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:54,518 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:A 2024-11-25T17:09:54,518 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:09:54,521 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/B of 3c0802cb7cf476d143cab96601b733ab into 6ee16fe2e83e48e4907f821ce3155305(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:54,521 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:54,521 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/B, priority=12, startTime=1732554594427; duration=0sec 2024-11-25T17:09:54,521 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:54,521 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:B 2024-11-25T17:09:54,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742307_1483 (size=14341) 2024-11-25T17:09:54,523 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:09:54,523 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/C is initiating minor compaction (all files) 2024-11-25T17:09:54,523 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/C in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:54,523 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/895a159d483743bd91583650a132fb01, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/67a7f0d95f9d40e49abb2796b6de47e2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/5f579f7adcc14a01973f3373a7f82b5d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/12f4f9d785ce47cb9d28c2e98f22494f] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=47.0 K 2024-11-25T17:09:54,524 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 895a159d483743bd91583650a132fb01, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732554590943 2024-11-25T17:09:54,524 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/3a6b55984a86448c9646d6a81bd40354 2024-11-25T17:09:54,524 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67a7f0d95f9d40e49abb2796b6de47e2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732554592153 2024-11-25T17:09:54,525 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f579f7adcc14a01973f3373a7f82b5d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732554592726 2024-11-25T17:09:54,525 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12f4f9d785ce47cb9d28c2e98f22494f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732554593938 2024-11-25T17:09:54,539 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/7b0e58b5d90047a192c4e764a8c7ee7a is 50, key is test_row_0/B:col10/1732554594479/Put/seqid=0 2024-11-25T17:09:54,548 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#C#compaction#407 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:54,548 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/ff63502e3a34444eba5e4861b058062f is 50, key is test_row_0/C:col10/1732554594334/Put/seqid=0 2024-11-25T17:09:54,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742308_1484 (size=12001) 2024-11-25T17:09:54,556 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/7b0e58b5d90047a192c4e764a8c7ee7a 2024-11-25T17:09:54,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:54,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554654574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:54,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:54,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554654575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:54,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742309_1485 (size=12241) 2024-11-25T17:09:54,586 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/b990763645a242e1a279b61dbf5b4de7 is 50, key is test_row_0/C:col10/1732554594479/Put/seqid=0 2024-11-25T17:09:54,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:54,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554654588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:54,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:54,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554654590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:54,605 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/ff63502e3a34444eba5e4861b058062f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ff63502e3a34444eba5e4861b058062f 2024-11-25T17:09:54,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742310_1486 (size=12001) 2024-11-25T17:09:54,610 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/b990763645a242e1a279b61dbf5b4de7 2024-11-25T17:09:54,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/3a6b55984a86448c9646d6a81bd40354 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3a6b55984a86448c9646d6a81bd40354 2024-11-25T17:09:54,624 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/C of 3c0802cb7cf476d143cab96601b733ab into ff63502e3a34444eba5e4861b058062f(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:54,624 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:54,624 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/C, priority=12, startTime=1732554594427; duration=0sec 2024-11-25T17:09:54,624 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:54,624 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:C 2024-11-25T17:09:54,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3a6b55984a86448c9646d6a81bd40354, entries=200, sequenceid=127, filesize=14.0 K 2024-11-25T17:09:54,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/7b0e58b5d90047a192c4e764a8c7ee7a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7b0e58b5d90047a192c4e764a8c7ee7a 2024-11-25T17:09:54,645 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7b0e58b5d90047a192c4e764a8c7ee7a, entries=150, sequenceid=127, filesize=11.7 K 2024-11-25T17:09:54,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/b990763645a242e1a279b61dbf5b4de7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/b990763645a242e1a279b61dbf5b4de7 2024-11-25T17:09:54,649 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/b990763645a242e1a279b61dbf5b4de7, entries=150, sequenceid=127, filesize=11.7 K 2024-11-25T17:09:54,650 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 3c0802cb7cf476d143cab96601b733ab in 169ms, sequenceid=127, compaction requested=false 2024-11-25T17:09:54,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:54,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-25T17:09:54,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:09:54,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:54,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:09:54,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:54,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:09:54,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:54,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:54,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/01cc76fabcc54a2993ff4baced789a4c is 50, key is test_row_0/A:col10/1732554594689/Put/seqid=0 2024-11-25T17:09:54,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742311_1487 (size=12151) 2024-11-25T17:09:54,716 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/01cc76fabcc54a2993ff4baced789a4c 2024-11-25T17:09:54,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:54,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554654726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:54,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:54,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554654726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:54,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/059a4c2ce186425cb2840430adaec899 is 50, key is test_row_0/B:col10/1732554594689/Put/seqid=0 2024-11-25T17:09:54,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742312_1488 (size=12151) 2024-11-25T17:09:54,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:54,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554654838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:54,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:54,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554654845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:54,990 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-25T17:09:55,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:55,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554655050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:55,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:55,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554655055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:55,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:55,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554655096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:55,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:55,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554655098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:55,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-25T17:09:55,129 INFO [Thread-2051 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-25T17:09:55,134 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:55,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-25T17:09:55,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-25T17:09:55,135 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:55,135 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:55,136 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:55,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/059a4c2ce186425cb2840430adaec899 2024-11-25T17:09:55,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/3da56ba77d1642d29cefe8ffbe7d6fc9 is 50, key is test_row_0/C:col10/1732554594689/Put/seqid=0 2024-11-25T17:09:55,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742313_1489 (size=12151) 2024-11-25T17:09:55,225 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/3da56ba77d1642d29cefe8ffbe7d6fc9 2024-11-25T17:09:55,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/01cc76fabcc54a2993ff4baced789a4c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/01cc76fabcc54a2993ff4baced789a4c 2024-11-25T17:09:55,235 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/01cc76fabcc54a2993ff4baced789a4c, entries=150, sequenceid=156, filesize=11.9 K 2024-11-25T17:09:55,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/059a4c2ce186425cb2840430adaec899 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/059a4c2ce186425cb2840430adaec899 2024-11-25T17:09:55,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-25T17:09:55,239 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/059a4c2ce186425cb2840430adaec899, entries=150, sequenceid=156, filesize=11.9 K 2024-11-25T17:09:55,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/3da56ba77d1642d29cefe8ffbe7d6fc9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/3da56ba77d1642d29cefe8ffbe7d6fc9 2024-11-25T17:09:55,244 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/3da56ba77d1642d29cefe8ffbe7d6fc9, entries=150, sequenceid=156, filesize=11.9 K 2024-11-25T17:09:55,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 3c0802cb7cf476d143cab96601b733ab in 552ms, sequenceid=156, compaction requested=true 2024-11-25T17:09:55,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:55,245 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:55,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:55,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:55,245 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:55,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:55,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:55,246 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38733 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:55,246 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/A is initiating minor compaction (all files) 2024-11-25T17:09:55,246 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/A in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:55,246 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e52358b2e5b840b69af2e25b8af50401, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3a6b55984a86448c9646d6a81bd40354, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/01cc76fabcc54a2993ff4baced789a4c] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=37.8 K 2024-11-25T17:09:55,246 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:55,246 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/B is initiating minor compaction (all files) 2024-11-25T17:09:55,246 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/B in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:55,246 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6ee16fe2e83e48e4907f821ce3155305, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7b0e58b5d90047a192c4e764a8c7ee7a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/059a4c2ce186425cb2840430adaec899] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=35.5 K 2024-11-25T17:09:55,246 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting e52358b2e5b840b69af2e25b8af50401, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732554593938 2024-11-25T17:09:55,247 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ee16fe2e83e48e4907f821ce3155305, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732554593938 2024-11-25T17:09:55,247 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a6b55984a86448c9646d6a81bd40354, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732554594360 2024-11-25T17:09:55,247 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b0e58b5d90047a192c4e764a8c7ee7a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732554594360 2024-11-25T17:09:55,247 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01cc76fabcc54a2993ff4baced789a4c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732554594568 2024-11-25T17:09:55,247 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 059a4c2ce186425cb2840430adaec899, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732554594568 2024-11-25T17:09:55,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:55,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:55,257 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#B#compaction#412 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:55,258 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#A#compaction#413 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:55,259 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/f310ddc7b9a840cebad55f67eccde37e is 50, key is test_row_0/A:col10/1732554594689/Put/seqid=0 2024-11-25T17:09:55,259 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/a4b9de6250124181b4fcd884aa4808bf is 50, key is test_row_0/B:col10/1732554594689/Put/seqid=0 2024-11-25T17:09:55,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742314_1490 (size=12493) 2024-11-25T17:09:55,289 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:55,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-25T17:09:55,296 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/f310ddc7b9a840cebad55f67eccde37e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/f310ddc7b9a840cebad55f67eccde37e 2024-11-25T17:09:55,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:55,297 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:09:55,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:09:55,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:55,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:09:55,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:55,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:09:55,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:55,303 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/A of 3c0802cb7cf476d143cab96601b733ab into f310ddc7b9a840cebad55f67eccde37e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:55,303 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:55,303 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/A, priority=13, startTime=1732554595244; duration=0sec 2024-11-25T17:09:55,303 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:55,303 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:A 2024-11-25T17:09:55,303 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:55,304 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:55,304 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/C is initiating minor compaction (all files) 2024-11-25T17:09:55,304 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/C in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:55,304 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ff63502e3a34444eba5e4861b058062f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/b990763645a242e1a279b61dbf5b4de7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/3da56ba77d1642d29cefe8ffbe7d6fc9] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=35.5 K 2024-11-25T17:09:55,305 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff63502e3a34444eba5e4861b058062f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732554593938 2024-11-25T17:09:55,305 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting b990763645a242e1a279b61dbf5b4de7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732554594360 2024-11-25T17:09:55,305 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3da56ba77d1642d29cefe8ffbe7d6fc9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732554594568 2024-11-25T17:09:55,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/bd4a280e001f4a89bedd202cc59e4784 is 50, key is test_row_0/A:col10/1732554594712/Put/seqid=0 2024-11-25T17:09:55,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742315_1491 (size=12493) 2024-11-25T17:09:55,321 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/a4b9de6250124181b4fcd884aa4808bf as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/a4b9de6250124181b4fcd884aa4808bf 2024-11-25T17:09:55,325 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/B of 3c0802cb7cf476d143cab96601b733ab into a4b9de6250124181b4fcd884aa4808bf(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:55,325 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:55,326 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/B, priority=13, startTime=1732554595245; duration=0sec 2024-11-25T17:09:55,326 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:55,326 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:B 2024-11-25T17:09:55,346 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#C#compaction#415 average throughput is 0.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:55,347 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/ce6446f78d8942d6bf06a7990133ecdc is 50, key is test_row_0/C:col10/1732554594689/Put/seqid=0 2024-11-25T17:09:55,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742316_1492 (size=9757) 2024-11-25T17:09:55,349 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/bd4a280e001f4a89bedd202cc59e4784 2024-11-25T17:09:55,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/d91b698f1e6e412c8bf3ff7ab173ddc1 is 50, key is test_row_0/B:col10/1732554594712/Put/seqid=0 2024-11-25T17:09:55,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:55,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:55,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742317_1493 (size=12493) 2024-11-25T17:09:55,383 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/ce6446f78d8942d6bf06a7990133ecdc as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ce6446f78d8942d6bf06a7990133ecdc 2024-11-25T17:09:55,388 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/C of 3c0802cb7cf476d143cab96601b733ab into ce6446f78d8942d6bf06a7990133ecdc(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:55,388 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:55,388 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/C, priority=13, startTime=1732554595245; duration=0sec 2024-11-25T17:09:55,389 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:55,389 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:C 2024-11-25T17:09:55,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742318_1494 (size=9757) 2024-11-25T17:09:55,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-25T17:09:55,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:55,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554655468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:55,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:55,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554655470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:55,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:55,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554655575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:55,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:55,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554655581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:55,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-25T17:09:55,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:55,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554655779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:55,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:55,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554655786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:55,794 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/d91b698f1e6e412c8bf3ff7ab173ddc1 2024-11-25T17:09:55,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/99558f67e4cd42d08284089b12165fe7 is 50, key is test_row_0/C:col10/1732554594712/Put/seqid=0 2024-11-25T17:09:55,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742319_1495 (size=9757) 2024-11-25T17:09:55,840 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/99558f67e4cd42d08284089b12165fe7 2024-11-25T17:09:55,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/bd4a280e001f4a89bedd202cc59e4784 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/bd4a280e001f4a89bedd202cc59e4784 2024-11-25T17:09:55,851 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/bd4a280e001f4a89bedd202cc59e4784, entries=100, sequenceid=167, filesize=9.5 K 2024-11-25T17:09:55,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/d91b698f1e6e412c8bf3ff7ab173ddc1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/d91b698f1e6e412c8bf3ff7ab173ddc1 2024-11-25T17:09:55,856 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/d91b698f1e6e412c8bf3ff7ab173ddc1, entries=100, sequenceid=167, filesize=9.5 K 2024-11-25T17:09:55,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/99558f67e4cd42d08284089b12165fe7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/99558f67e4cd42d08284089b12165fe7 2024-11-25T17:09:55,861 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/99558f67e4cd42d08284089b12165fe7, entries=100, sequenceid=167, filesize=9.5 K 2024-11-25T17:09:55,862 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 3c0802cb7cf476d143cab96601b733ab in 565ms, sequenceid=167, compaction requested=false 2024-11-25T17:09:55,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:55,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:55,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-25T17:09:55,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-25T17:09:55,865 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-25T17:09:55,865 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 728 msec 2024-11-25T17:09:55,868 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 733 msec 2024-11-25T17:09:56,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:56,095 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-25T17:09:56,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:09:56,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:56,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:09:56,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:56,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:09:56,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:56,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/e0e303a330bd4d70a69ac19dbb5ad34c is 50, key is test_row_0/A:col10/1732554596089/Put/seqid=0 2024-11-25T17:09:56,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554656109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554656113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554656115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554656117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742320_1496 (size=14541) 2024-11-25T17:09:56,125 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/e0e303a330bd4d70a69ac19dbb5ad34c 2024-11-25T17:09:56,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/51da9c682637435594f8d96e1a4eafac is 50, key is test_row_0/B:col10/1732554596089/Put/seqid=0 2024-11-25T17:09:56,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742321_1497 (size=12151) 2024-11-25T17:09:56,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554656222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554656223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554656226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554656227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-25T17:09:56,243 INFO [Thread-2051 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-25T17:09:56,246 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:56,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-25T17:09:56,248 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:56,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-25T17:09:56,249 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:56,249 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:56,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554656326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,329 DEBUG [Thread-2041 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4163 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., hostname=6579369734b6,41865,1732554474464, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:09:56,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-25T17:09:56,401 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:56,401 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-25T17:09:56,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:56,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:56,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:56,402 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:56,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:56,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:56,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554656429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554656430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554656430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554656433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,535 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/03ecdb590d0b4addbf9a3ccb895efa82, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4f9588d46d75400bbe3ccf6305a62917, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/72b29c3611884c92b12ba325edac45d3, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/b952fcd6748b46fdbce498641f4e55c3, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/eb85fd72748c4cd098adb8769cff0ef0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e7c572f504834ef6a18b2fb6044b1825, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/eeb9db151ef54c54bf52631aa9b34712, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e52358b2e5b840b69af2e25b8af50401, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3a6b55984a86448c9646d6a81bd40354, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/01cc76fabcc54a2993ff4baced789a4c] to archive 2024-11-25T17:09:56,536 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:09:56,538 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/03ecdb590d0b4addbf9a3ccb895efa82 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/03ecdb590d0b4addbf9a3ccb895efa82 2024-11-25T17:09:56,539 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4f9588d46d75400bbe3ccf6305a62917 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4f9588d46d75400bbe3ccf6305a62917 2024-11-25T17:09:56,540 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/72b29c3611884c92b12ba325edac45d3 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/72b29c3611884c92b12ba325edac45d3 2024-11-25T17:09:56,542 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/b952fcd6748b46fdbce498641f4e55c3 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/b952fcd6748b46fdbce498641f4e55c3 2024-11-25T17:09:56,543 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/eb85fd72748c4cd098adb8769cff0ef0 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/eb85fd72748c4cd098adb8769cff0ef0 2024-11-25T17:09:56,544 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e7c572f504834ef6a18b2fb6044b1825 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e7c572f504834ef6a18b2fb6044b1825 2024-11-25T17:09:56,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-25T17:09:56,559 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/51da9c682637435594f8d96e1a4eafac 2024-11-25T17:09:56,561 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:56,562 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-25T17:09:56,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:56,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:56,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:56,565 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:56,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:56,568 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/eeb9db151ef54c54bf52631aa9b34712 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/eeb9db151ef54c54bf52631aa9b34712 2024-11-25T17:09:56,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:56,573 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e52358b2e5b840b69af2e25b8af50401 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e52358b2e5b840b69af2e25b8af50401 2024-11-25T17:09:56,575 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3a6b55984a86448c9646d6a81bd40354 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3a6b55984a86448c9646d6a81bd40354 2024-11-25T17:09:56,576 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/01cc76fabcc54a2993ff4baced789a4c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/01cc76fabcc54a2993ff4baced789a4c 2024-11-25T17:09:56,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/63fb7b2d2f584dd2bc98b3cd8838cfbc is 50, key is test_row_0/C:col10/1732554596089/Put/seqid=0 2024-11-25T17:09:56,586 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/dbeeef6d81424082b34007799d85f2a4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/99539e8e59b5461ca9313f70e8a22462, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/29f12ea0c9c64b0f805198077a6e50db, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/852d10e2e1be4506860a0157792c47d9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/edc9eb206ee6419fa0e609a11841a16b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/07b8aaaa506348eca299b0a4811c070d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6ee16fe2e83e48e4907f821ce3155305, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7319b11a66564e57b8bc645e1ac3054d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7b0e58b5d90047a192c4e764a8c7ee7a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/059a4c2ce186425cb2840430adaec899] to archive 2024-11-25T17:09:56,590 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:09:56,591 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/dbeeef6d81424082b34007799d85f2a4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/dbeeef6d81424082b34007799d85f2a4 2024-11-25T17:09:56,594 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/99539e8e59b5461ca9313f70e8a22462 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/99539e8e59b5461ca9313f70e8a22462 2024-11-25T17:09:56,596 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/29f12ea0c9c64b0f805198077a6e50db to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/29f12ea0c9c64b0f805198077a6e50db 2024-11-25T17:09:56,597 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/852d10e2e1be4506860a0157792c47d9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/852d10e2e1be4506860a0157792c47d9 2024-11-25T17:09:56,598 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/edc9eb206ee6419fa0e609a11841a16b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/edc9eb206ee6419fa0e609a11841a16b 2024-11-25T17:09:56,599 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/07b8aaaa506348eca299b0a4811c070d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/07b8aaaa506348eca299b0a4811c070d 2024-11-25T17:09:56,610 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6ee16fe2e83e48e4907f821ce3155305 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6ee16fe2e83e48e4907f821ce3155305 2024-11-25T17:09:56,615 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7319b11a66564e57b8bc645e1ac3054d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7319b11a66564e57b8bc645e1ac3054d 2024-11-25T17:09:56,616 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7b0e58b5d90047a192c4e764a8c7ee7a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7b0e58b5d90047a192c4e764a8c7ee7a 2024-11-25T17:09:56,617 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/059a4c2ce186425cb2840430adaec899 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/059a4c2ce186425cb2840430adaec899 2024-11-25T17:09:56,622 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/9d0707c6caca47018de83b06f16a7de0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/5b9b20c8ab614e72a53416a89fc8443c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/895a159d483743bd91583650a132fb01, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/323bd4352f634bcf8e09e5c52a97d2df, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/67a7f0d95f9d40e49abb2796b6de47e2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/5f579f7adcc14a01973f3373a7f82b5d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ff63502e3a34444eba5e4861b058062f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/12f4f9d785ce47cb9d28c2e98f22494f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/b990763645a242e1a279b61dbf5b4de7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/3da56ba77d1642d29cefe8ffbe7d6fc9] to archive 2024-11-25T17:09:56,624 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:09:56,626 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/9d0707c6caca47018de83b06f16a7de0 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/9d0707c6caca47018de83b06f16a7de0 2024-11-25T17:09:56,627 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/5b9b20c8ab614e72a53416a89fc8443c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/5b9b20c8ab614e72a53416a89fc8443c 2024-11-25T17:09:56,628 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/895a159d483743bd91583650a132fb01 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/895a159d483743bd91583650a132fb01 2024-11-25T17:09:56,629 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/323bd4352f634bcf8e09e5c52a97d2df to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/323bd4352f634bcf8e09e5c52a97d2df 2024-11-25T17:09:56,631 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/67a7f0d95f9d40e49abb2796b6de47e2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/67a7f0d95f9d40e49abb2796b6de47e2 2024-11-25T17:09:56,632 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/5f579f7adcc14a01973f3373a7f82b5d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/5f579f7adcc14a01973f3373a7f82b5d 2024-11-25T17:09:56,633 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ff63502e3a34444eba5e4861b058062f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ff63502e3a34444eba5e4861b058062f 2024-11-25T17:09:56,634 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/12f4f9d785ce47cb9d28c2e98f22494f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/12f4f9d785ce47cb9d28c2e98f22494f 2024-11-25T17:09:56,635 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/b990763645a242e1a279b61dbf5b4de7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/b990763645a242e1a279b61dbf5b4de7 2024-11-25T17:09:56,636 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/6579369734b6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/3da56ba77d1642d29cefe8ffbe7d6fc9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/3da56ba77d1642d29cefe8ffbe7d6fc9 2024-11-25T17:09:56,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742322_1498 (size=12151) 2024-11-25T17:09:56,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/63fb7b2d2f584dd2bc98b3cd8838cfbc 2024-11-25T17:09:56,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/e0e303a330bd4d70a69ac19dbb5ad34c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e0e303a330bd4d70a69ac19dbb5ad34c 2024-11-25T17:09:56,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e0e303a330bd4d70a69ac19dbb5ad34c, entries=200, sequenceid=196, filesize=14.2 K 2024-11-25T17:09:56,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/51da9c682637435594f8d96e1a4eafac as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/51da9c682637435594f8d96e1a4eafac 2024-11-25T17:09:56,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/51da9c682637435594f8d96e1a4eafac, entries=150, sequenceid=196, filesize=11.9 K 2024-11-25T17:09:56,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/63fb7b2d2f584dd2bc98b3cd8838cfbc as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/63fb7b2d2f584dd2bc98b3cd8838cfbc 2024-11-25T17:09:56,693 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/63fb7b2d2f584dd2bc98b3cd8838cfbc, entries=150, sequenceid=196, filesize=11.9 K 2024-11-25T17:09:56,694 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 3c0802cb7cf476d143cab96601b733ab in 599ms, sequenceid=196, compaction requested=true 2024-11-25T17:09:56,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:56,695 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:56,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:56,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:56,695 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:56,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:56,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:56,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:56,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:56,696 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:56,696 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/B is initiating minor compaction (all files) 2024-11-25T17:09:56,696 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/B in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:56,696 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/a4b9de6250124181b4fcd884aa4808bf, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/d91b698f1e6e412c8bf3ff7ab173ddc1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/51da9c682637435594f8d96e1a4eafac] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=33.6 K 2024-11-25T17:09:56,696 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36791 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:56,696 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/A is initiating minor compaction (all files) 2024-11-25T17:09:56,696 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/A in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:56,697 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/f310ddc7b9a840cebad55f67eccde37e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/bd4a280e001f4a89bedd202cc59e4784, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e0e303a330bd4d70a69ac19dbb5ad34c] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=35.9 K 2024-11-25T17:09:56,697 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting a4b9de6250124181b4fcd884aa4808bf, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732554594568 2024-11-25T17:09:56,697 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting f310ddc7b9a840cebad55f67eccde37e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732554594568 2024-11-25T17:09:56,697 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting d91b698f1e6e412c8bf3ff7ab173ddc1, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732554594712 2024-11-25T17:09:56,697 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd4a280e001f4a89bedd202cc59e4784, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732554594712 2024-11-25T17:09:56,698 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 51da9c682637435594f8d96e1a4eafac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732554595458 2024-11-25T17:09:56,698 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0e303a330bd4d70a69ac19dbb5ad34c, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732554595436 2024-11-25T17:09:56,710 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#A#compaction#421 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:56,710 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/fc36575f1b5b4f9f9ef6c1c93f0a16b1 is 50, key is test_row_0/A:col10/1732554596089/Put/seqid=0 2024-11-25T17:09:56,712 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#B#compaction#422 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:56,712 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/b39d3c1adc4b4142ab8b2288fea35a8e is 50, key is test_row_0/B:col10/1732554596089/Put/seqid=0 2024-11-25T17:09:56,724 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:56,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-25T17:09:56,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:56,725 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-25T17:09:56,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:09:56,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:56,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:09:56,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:56,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:09:56,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:56,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:56,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:56,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/8d2b8a2317394b519bebe4b16599d25f is 50, key is test_row_0/A:col10/1732554596114/Put/seqid=0 2024-11-25T17:09:56,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742323_1499 (size=12254) 2024-11-25T17:09:56,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742324_1500 (size=12254) 2024-11-25T17:09:56,799 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/fc36575f1b5b4f9f9ef6c1c93f0a16b1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/fc36575f1b5b4f9f9ef6c1c93f0a16b1 2024-11-25T17:09:56,813 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/b39d3c1adc4b4142ab8b2288fea35a8e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/b39d3c1adc4b4142ab8b2288fea35a8e 2024-11-25T17:09:56,819 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/A of 3c0802cb7cf476d143cab96601b733ab into fc36575f1b5b4f9f9ef6c1c93f0a16b1(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:56,819 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:56,819 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/A, priority=13, startTime=1732554596695; duration=0sec 2024-11-25T17:09:56,819 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:56,819 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:A 2024-11-25T17:09:56,819 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:56,823 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:56,823 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/C is initiating minor compaction (all files) 2024-11-25T17:09:56,823 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/C in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:56,824 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ce6446f78d8942d6bf06a7990133ecdc, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/99558f67e4cd42d08284089b12165fe7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/63fb7b2d2f584dd2bc98b3cd8838cfbc] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=33.6 K 2024-11-25T17:09:56,824 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce6446f78d8942d6bf06a7990133ecdc, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732554594568 2024-11-25T17:09:56,824 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99558f67e4cd42d08284089b12165fe7, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732554594712 2024-11-25T17:09:56,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554656817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,829 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63fb7b2d2f584dd2bc98b3cd8838cfbc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732554595458 2024-11-25T17:09:56,836 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/B of 3c0802cb7cf476d143cab96601b733ab into b39d3c1adc4b4142ab8b2288fea35a8e(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:56,836 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:56,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742325_1501 (size=12151) 2024-11-25T17:09:56,836 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/B, priority=13, startTime=1732554596695; duration=0sec 2024-11-25T17:09:56,836 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:56,836 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:B 2024-11-25T17:09:56,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554656826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554656826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554656826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,839 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/8d2b8a2317394b519bebe4b16599d25f 2024-11-25T17:09:56,849 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#C#compaction#424 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:56,849 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/6a334bbb699141ada0d68b2fa1ae9f08 is 50, key is test_row_0/C:col10/1732554596089/Put/seqid=0 2024-11-25T17:09:56,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-25T17:09:56,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/9cbd37c25d0140e1b3152631a07c5737 is 50, key is test_row_0/B:col10/1732554596114/Put/seqid=0 2024-11-25T17:09:56,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742326_1502 (size=12254) 2024-11-25T17:09:56,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742327_1503 (size=12151) 2024-11-25T17:09:56,899 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/9cbd37c25d0140e1b3152631a07c5737 2024-11-25T17:09:56,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/ddb0c45675ed4cb59388e4f2363b0423 is 50, key is test_row_0/C:col10/1732554596114/Put/seqid=0 2024-11-25T17:09:56,907 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/6a334bbb699141ada0d68b2fa1ae9f08 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6a334bbb699141ada0d68b2fa1ae9f08 2024-11-25T17:09:56,912 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/C of 3c0802cb7cf476d143cab96601b733ab into 6a334bbb699141ada0d68b2fa1ae9f08(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:56,912 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:56,912 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/C, priority=13, startTime=1732554596695; duration=0sec 2024-11-25T17:09:56,912 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:56,912 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:C 2024-11-25T17:09:56,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554656933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554656940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554656945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:56,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554656946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:56,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742328_1504 (size=12151) 2024-11-25T17:09:57,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:57,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554657139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:57,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:57,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554657144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:57,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:57,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554657150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:57,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:57,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554657159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:57,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-25T17:09:57,380 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/ddb0c45675ed4cb59388e4f2363b0423 2024-11-25T17:09:57,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/8d2b8a2317394b519bebe4b16599d25f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/8d2b8a2317394b519bebe4b16599d25f 2024-11-25T17:09:57,390 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/8d2b8a2317394b519bebe4b16599d25f, entries=150, sequenceid=206, filesize=11.9 K 2024-11-25T17:09:57,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/9cbd37c25d0140e1b3152631a07c5737 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/9cbd37c25d0140e1b3152631a07c5737 2024-11-25T17:09:57,402 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/9cbd37c25d0140e1b3152631a07c5737, entries=150, sequenceid=206, filesize=11.9 K 2024-11-25T17:09:57,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/ddb0c45675ed4cb59388e4f2363b0423 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ddb0c45675ed4cb59388e4f2363b0423 2024-11-25T17:09:57,407 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ddb0c45675ed4cb59388e4f2363b0423, entries=150, sequenceid=206, filesize=11.9 K 2024-11-25T17:09:57,408 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 3c0802cb7cf476d143cab96601b733ab in 683ms, sequenceid=206, compaction requested=false 2024-11-25T17:09:57,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:57,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:57,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-25T17:09:57,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-25T17:09:57,411 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-25T17:09:57,411 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1600 sec 2024-11-25T17:09:57,413 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.1650 sec 2024-11-25T17:09:57,451 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-25T17:09:57,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:09:57,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:57,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:09:57,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:57,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:09:57,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:57,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:57,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/fd79c98c6913486c98f9f53f503344f1 is 50, key is test_row_0/A:col10/1732554597449/Put/seqid=0 2024-11-25T17:09:57,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:57,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554657470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:57,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:57,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554657477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:57,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:57,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554657478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:57,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:57,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554657481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:57,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742329_1505 (size=14541) 2024-11-25T17:09:57,506 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/fd79c98c6913486c98f9f53f503344f1 2024-11-25T17:09:57,532 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/cacf422c0d7745a8990ee82325864071 is 50, key is test_row_0/B:col10/1732554597449/Put/seqid=0 2024-11-25T17:09:57,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742330_1506 (size=12151) 2024-11-25T17:09:57,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:57,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554657590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:57,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:57,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554657592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:57,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:57,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554657593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:57,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:57,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554657796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:57,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:57,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554657802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:57,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:57,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554657803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:57,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:57,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554657982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:57,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/cacf422c0d7745a8990ee82325864071 2024-11-25T17:09:57,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/6674743537de4276a496a39ccf4d2323 is 50, key is test_row_0/C:col10/1732554597449/Put/seqid=0 2024-11-25T17:09:58,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742331_1507 (size=12151) 2024-11-25T17:09:58,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:58,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554658104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:58,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:58,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554658109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:58,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:58,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554658113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:58,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-25T17:09:58,372 INFO [Thread-2051 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-25T17:09:58,373 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:58,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-25T17:09:58,375 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:58,376 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:58,376 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:58,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-25T17:09:58,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/6674743537de4276a496a39ccf4d2323 2024-11-25T17:09:58,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/fd79c98c6913486c98f9f53f503344f1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/fd79c98c6913486c98f9f53f503344f1 2024-11-25T17:09:58,438 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/fd79c98c6913486c98f9f53f503344f1, entries=200, sequenceid=236, filesize=14.2 K 2024-11-25T17:09:58,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/cacf422c0d7745a8990ee82325864071 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/cacf422c0d7745a8990ee82325864071 2024-11-25T17:09:58,445 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/cacf422c0d7745a8990ee82325864071, entries=150, sequenceid=236, filesize=11.9 K 2024-11-25T17:09:58,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/6674743537de4276a496a39ccf4d2323 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6674743537de4276a496a39ccf4d2323 2024-11-25T17:09:58,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6674743537de4276a496a39ccf4d2323, entries=150, sequenceid=236, filesize=11.9 K 2024-11-25T17:09:58,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 3c0802cb7cf476d143cab96601b733ab in 1005ms, sequenceid=236, compaction requested=true 2024-11-25T17:09:58,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:58,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:09:58,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:58,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:09:58,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-25T17:09:58,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:09:58,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-25T17:09:58,456 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:58,456 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:58,458 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38946 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:58,458 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36556 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:58,459 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/A is initiating minor compaction (all files) 2024-11-25T17:09:58,459 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/C is initiating minor compaction (all files) 2024-11-25T17:09:58,459 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/A in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:58,459 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/C in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:58,459 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/fc36575f1b5b4f9f9ef6c1c93f0a16b1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/8d2b8a2317394b519bebe4b16599d25f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/fd79c98c6913486c98f9f53f503344f1] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=38.0 K 2024-11-25T17:09:58,459 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6a334bbb699141ada0d68b2fa1ae9f08, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ddb0c45675ed4cb59388e4f2363b0423, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6674743537de4276a496a39ccf4d2323] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=35.7 K 2024-11-25T17:09:58,459 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc36575f1b5b4f9f9ef6c1c93f0a16b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732554595458 2024-11-25T17:09:58,459 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a334bbb699141ada0d68b2fa1ae9f08, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732554595458 2024-11-25T17:09:58,459 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting ddb0c45675ed4cb59388e4f2363b0423, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732554596099 2024-11-25T17:09:58,460 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d2b8a2317394b519bebe4b16599d25f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732554596099 2024-11-25T17:09:58,460 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 6674743537de4276a496a39ccf4d2323, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732554596818 2024-11-25T17:09:58,460 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd79c98c6913486c98f9f53f503344f1, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732554596815 2024-11-25T17:09:58,479 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#C#compaction#430 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:58,479 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/884ad72b6d0243698fe1d263a364eb67 is 50, key is test_row_0/C:col10/1732554597449/Put/seqid=0 2024-11-25T17:09:58,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-25T17:09:58,483 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#A#compaction#431 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:58,484 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/0bb78c3634724240be257d9d641c60a1 is 50, key is test_row_0/A:col10/1732554597449/Put/seqid=0 2024-11-25T17:09:58,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742332_1508 (size=12357) 2024-11-25T17:09:58,531 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:58,532 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-25T17:09:58,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:58,532 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-25T17:09:58,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:09:58,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:58,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:09:58,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:58,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:09:58,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:58,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742333_1509 (size=12357) 2024-11-25T17:09:58,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/29dab1e864fe4e19b397ccf0173ca7fd is 50, key is test_row_0/A:col10/1732554597477/Put/seqid=0 2024-11-25T17:09:58,576 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/0bb78c3634724240be257d9d641c60a1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/0bb78c3634724240be257d9d641c60a1 2024-11-25T17:09:58,581 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/A of 3c0802cb7cf476d143cab96601b733ab into 0bb78c3634724240be257d9d641c60a1(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:58,581 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:58,581 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/A, priority=13, startTime=1732554598456; duration=0sec 2024-11-25T17:09:58,581 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:09:58,581 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:A 2024-11-25T17:09:58,581 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:09:58,582 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36556 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:09:58,582 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/B is initiating minor compaction (all files) 2024-11-25T17:09:58,582 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/B in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:58,583 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/b39d3c1adc4b4142ab8b2288fea35a8e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/9cbd37c25d0140e1b3152631a07c5737, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/cacf422c0d7745a8990ee82325864071] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=35.7 K 2024-11-25T17:09:58,583 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting b39d3c1adc4b4142ab8b2288fea35a8e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732554595458 2024-11-25T17:09:58,583 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cbd37c25d0140e1b3152631a07c5737, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1732554596099 2024-11-25T17:09:58,584 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting cacf422c0d7745a8990ee82325864071, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732554596818 2024-11-25T17:09:58,604 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#B#compaction#433 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:09:58,605 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/c5d7d15e3dfb49db904b951e0b661a41 is 50, key is test_row_0/B:col10/1732554597449/Put/seqid=0 2024-11-25T17:09:58,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742334_1510 (size=12151) 2024-11-25T17:09:58,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:58,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:58,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742335_1511 (size=12357) 2024-11-25T17:09:58,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-25T17:09:58,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:58,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554658683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:58,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:58,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554658684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:58,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:58,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554658691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:58,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:58,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554658792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:58,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:58,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554658793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:58,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:58,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554658793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:58,932 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/884ad72b6d0243698fe1d263a364eb67 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/884ad72b6d0243698fe1d263a364eb67 2024-11-25T17:09:58,939 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/C of 3c0802cb7cf476d143cab96601b733ab into 884ad72b6d0243698fe1d263a364eb67(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:58,939 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:58,939 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/C, priority=13, startTime=1732554598456; duration=0sec 2024-11-25T17:09:58,939 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:58,939 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:C 2024-11-25T17:09:58,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-25T17:09:58,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:58,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554658988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:59,007 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:59,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554659003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:59,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:59,010 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/29dab1e864fe4e19b397ccf0173ca7fd 2024-11-25T17:09:59,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554659003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:59,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:59,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554659004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:59,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/45aaa3b6d7b74ceca27aa1fd7af27df4 is 50, key is test_row_0/B:col10/1732554597477/Put/seqid=0 2024-11-25T17:09:59,048 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/c5d7d15e3dfb49db904b951e0b661a41 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/c5d7d15e3dfb49db904b951e0b661a41 2024-11-25T17:09:59,056 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/B of 3c0802cb7cf476d143cab96601b733ab into c5d7d15e3dfb49db904b951e0b661a41(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:09:59,056 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:59,056 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/B, priority=13, startTime=1732554598456; duration=0sec 2024-11-25T17:09:59,056 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:09:59,056 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:B 2024-11-25T17:09:59,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742336_1512 (size=12151) 2024-11-25T17:09:59,064 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/45aaa3b6d7b74ceca27aa1fd7af27df4 2024-11-25T17:09:59,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/a86fc083c00b48d5b82cf879d8e63e7e is 50, key is test_row_0/C:col10/1732554597477/Put/seqid=0 2024-11-25T17:09:59,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742337_1513 (size=12151) 2024-11-25T17:09:59,122 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/a86fc083c00b48d5b82cf879d8e63e7e 2024-11-25T17:09:59,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/29dab1e864fe4e19b397ccf0173ca7fd as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/29dab1e864fe4e19b397ccf0173ca7fd 2024-11-25T17:09:59,132 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/29dab1e864fe4e19b397ccf0173ca7fd, entries=150, sequenceid=246, filesize=11.9 K 2024-11-25T17:09:59,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/45aaa3b6d7b74ceca27aa1fd7af27df4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/45aaa3b6d7b74ceca27aa1fd7af27df4 2024-11-25T17:09:59,138 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/45aaa3b6d7b74ceca27aa1fd7af27df4, entries=150, sequenceid=246, filesize=11.9 K 2024-11-25T17:09:59,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/a86fc083c00b48d5b82cf879d8e63e7e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a86fc083c00b48d5b82cf879d8e63e7e 2024-11-25T17:09:59,141 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a86fc083c00b48d5b82cf879d8e63e7e, entries=150, sequenceid=246, filesize=11.9 K 2024-11-25T17:09:59,142 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 3c0802cb7cf476d143cab96601b733ab in 610ms, sequenceid=246, compaction requested=false 2024-11-25T17:09:59,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:09:59,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:59,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-25T17:09:59,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-25T17:09:59,144 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-25T17:09:59,144 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 767 msec 2024-11-25T17:09:59,146 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 772 msec 2024-11-25T17:09:59,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:09:59,317 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-25T17:09:59,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:09:59,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:59,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:09:59,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:59,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:09:59,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:09:59,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:59,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554659327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:59,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:59,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554659329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:59,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:59,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554659330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:59,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/44a30f6e18cb4faf98a6ab6ca32d3715 is 50, key is test_row_0/A:col10/1732554599313/Put/seqid=0 2024-11-25T17:09:59,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742338_1514 (size=12301) 2024-11-25T17:09:59,392 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/44a30f6e18cb4faf98a6ab6ca32d3715 2024-11-25T17:09:59,416 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/bee4138823444c55bb16c913c0efec4d is 50, key is test_row_0/B:col10/1732554599313/Put/seqid=0 2024-11-25T17:09:59,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:59,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554659433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:59,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:59,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554659433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:59,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:59,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554659434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:59,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742339_1515 (size=12301) 2024-11-25T17:09:59,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-25T17:09:59,484 INFO [Thread-2051 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-25T17:09:59,485 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:09:59,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-25T17:09:59,489 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:09:59,490 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:09:59,490 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:09:59,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-25T17:09:59,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-25T17:09:59,642 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:59,643 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:09:59,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:59,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:59,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:59,643 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:59,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:59,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:59,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554659639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:59,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:59,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554659640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:59,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:59,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:59,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554659641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-25T17:09:59,797 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:59,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:09:59,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:59,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:59,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:59,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:59,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:59,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/bee4138823444c55bb16c913c0efec4d 2024-11-25T17:09:59,882 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/9bc59d1d291b49c3865d5952791e5d04 is 50, key is test_row_0/C:col10/1732554599313/Put/seqid=0 2024-11-25T17:09:59,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742340_1516 (size=12301) 2024-11-25T17:09:59,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554659945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:59,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554659946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:59,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:09:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554659946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:09:59,951 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:09:59,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:09:59,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:59,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:09:59,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:09:59,952 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:59,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:09:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-25T17:10:00,104 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:00,104 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:00,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:00,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:00,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:00,105 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,261 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:00,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:00,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:00,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:00,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:00,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,303 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/9bc59d1d291b49c3865d5952791e5d04 2024-11-25T17:10:00,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/44a30f6e18cb4faf98a6ab6ca32d3715 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/44a30f6e18cb4faf98a6ab6ca32d3715 2024-11-25T17:10:00,315 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/44a30f6e18cb4faf98a6ab6ca32d3715, entries=150, sequenceid=276, filesize=12.0 K 2024-11-25T17:10:00,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/bee4138823444c55bb16c913c0efec4d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/bee4138823444c55bb16c913c0efec4d 2024-11-25T17:10:00,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/bee4138823444c55bb16c913c0efec4d, entries=150, sequenceid=276, filesize=12.0 K 2024-11-25T17:10:00,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/9bc59d1d291b49c3865d5952791e5d04 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/9bc59d1d291b49c3865d5952791e5d04 2024-11-25T17:10:00,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/9bc59d1d291b49c3865d5952791e5d04, entries=150, sequenceid=276, filesize=12.0 K 2024-11-25T17:10:00,326 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 3c0802cb7cf476d143cab96601b733ab in 1009ms, sequenceid=276, compaction requested=true 2024-11-25T17:10:00,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:00,326 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:00,327 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36809 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:00,327 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/A is initiating minor compaction (all files) 2024-11-25T17:10:00,327 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/A in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:00,327 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/0bb78c3634724240be257d9d641c60a1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/29dab1e864fe4e19b397ccf0173ca7fd, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/44a30f6e18cb4faf98a6ab6ca32d3715] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=35.9 K 2024-11-25T17:10:00,328 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0bb78c3634724240be257d9d641c60a1, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732554596818 2024-11-25T17:10:00,328 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29dab1e864fe4e19b397ccf0173ca7fd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732554597465 2024-11-25T17:10:00,328 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44a30f6e18cb4faf98a6ab6ca32d3715, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732554598682 2024-11-25T17:10:00,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:00,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:00,331 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:00,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:00,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:00,332 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:00,332 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:00,334 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36809 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:00,335 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/B is initiating minor compaction (all files) 2024-11-25T17:10:00,335 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/B in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:00,335 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/c5d7d15e3dfb49db904b951e0b661a41, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/45aaa3b6d7b74ceca27aa1fd7af27df4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/bee4138823444c55bb16c913c0efec4d] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=35.9 K 2024-11-25T17:10:00,335 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting c5d7d15e3dfb49db904b951e0b661a41, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732554596818 2024-11-25T17:10:00,336 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 45aaa3b6d7b74ceca27aa1fd7af27df4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732554597465 2024-11-25T17:10:00,336 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting bee4138823444c55bb16c913c0efec4d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732554598682 2024-11-25T17:10:00,340 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#A#compaction#439 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:00,341 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/f7ee925c8f874027b1b62c7e520989bb is 50, key is test_row_0/A:col10/1732554599313/Put/seqid=0 2024-11-25T17:10:00,345 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#B#compaction#440 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:00,346 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/e1ce42f9f4c44b48834083051e29ce74 is 50, key is test_row_0/B:col10/1732554599313/Put/seqid=0 2024-11-25T17:10:00,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742341_1517 (size=12609) 2024-11-25T17:10:00,354 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:10:00,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:10:00,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:00,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:10:00,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:00,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:10:00,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:00,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:00,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742342_1518 (size=12609) 2024-11-25T17:10:00,383 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/5e3e00b2df26449995cbe228e0ab990e is 50, key is test_row_0/A:col10/1732554599326/Put/seqid=0 2024-11-25T17:10:00,390 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/e1ce42f9f4c44b48834083051e29ce74 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/e1ce42f9f4c44b48834083051e29ce74 2024-11-25T17:10:00,404 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/B of 3c0802cb7cf476d143cab96601b733ab into e1ce42f9f4c44b48834083051e29ce74(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:00,404 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:00,404 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/B, priority=13, startTime=1732554600331; duration=0sec 2024-11-25T17:10:00,404 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:00,405 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:B 2024-11-25T17:10:00,405 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:00,415 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36809 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:00,415 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/C is initiating minor compaction (all files) 2024-11-25T17:10:00,415 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/C in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:00,415 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/884ad72b6d0243698fe1d263a364eb67, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a86fc083c00b48d5b82cf879d8e63e7e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/9bc59d1d291b49c3865d5952791e5d04] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=35.9 K 2024-11-25T17:10:00,417 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:00,418 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:00,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:00,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:00,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:00,419 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 884ad72b6d0243698fe1d263a364eb67, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732554596818 2024-11-25T17:10:00,419 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,419 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting a86fc083c00b48d5b82cf879d8e63e7e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1732554597465 2024-11-25T17:10:00,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,420 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bc59d1d291b49c3865d5952791e5d04, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732554598682 2024-11-25T17:10:00,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742343_1519 (size=14737) 2024-11-25T17:10:00,442 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/5e3e00b2df26449995cbe228e0ab990e 2024-11-25T17:10:00,454 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#C#compaction#442 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:00,455 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/6c982b410059498483d7b280d17b57a1 is 50, key is test_row_0/C:col10/1732554599313/Put/seqid=0 2024-11-25T17:10:00,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/003b2482634b459fa7fca93282f8a368 is 50, key is test_row_0/B:col10/1732554599326/Put/seqid=0 2024-11-25T17:10:00,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742344_1520 (size=12609) 2024-11-25T17:10:00,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742345_1521 (size=9857) 2024-11-25T17:10:00,507 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/003b2482634b459fa7fca93282f8a368 2024-11-25T17:10:00,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:00,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554660507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:00,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:00,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554660508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:00,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:00,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554660514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:00,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/39b9f686546347b7a8b7f3f0d346e401 is 50, key is test_row_0/C:col10/1732554599326/Put/seqid=0 2024-11-25T17:10:00,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:00,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554660524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:00,590 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:00,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742346_1522 (size=9857) 2024-11-25T17:10:00,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:00,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:00,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:00,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:00,594 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,596 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/39b9f686546347b7a8b7f3f0d346e401 2024-11-25T17:10:00,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-25T17:10:00,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/5e3e00b2df26449995cbe228e0ab990e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/5e3e00b2df26449995cbe228e0ab990e 2024-11-25T17:10:00,619 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/5e3e00b2df26449995cbe228e0ab990e, entries=200, sequenceid=287, filesize=14.4 K 2024-11-25T17:10:00,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/003b2482634b459fa7fca93282f8a368 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/003b2482634b459fa7fca93282f8a368 2024-11-25T17:10:00,624 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/003b2482634b459fa7fca93282f8a368, entries=100, sequenceid=287, filesize=9.6 K 2024-11-25T17:10:00,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/39b9f686546347b7a8b7f3f0d346e401 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/39b9f686546347b7a8b7f3f0d346e401 2024-11-25T17:10:00,634 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/39b9f686546347b7a8b7f3f0d346e401, entries=100, sequenceid=287, filesize=9.6 K 2024-11-25T17:10:00,637 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 3c0802cb7cf476d143cab96601b733ab in 283ms, sequenceid=287, compaction requested=false 2024-11-25T17:10:00,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:00,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:00,645 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-25T17:10:00,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:10:00,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:00,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:10:00,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:00,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:10:00,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:00,680 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/a9aa92db24874235b89677443a24d436 is 50, key is test_row_0/A:col10/1732554600644/Put/seqid=0 2024-11-25T17:10:00,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:00,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554660665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:00,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:00,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554660670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:00,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:00,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554660674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:00,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:00,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554660690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:00,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742347_1523 (size=12301) 2024-11-25T17:10:00,751 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:00,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:00,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:00,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:00,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:00,754 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,757 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/f7ee925c8f874027b1b62c7e520989bb as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/f7ee925c8f874027b1b62c7e520989bb 2024-11-25T17:10:00,763 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/A of 3c0802cb7cf476d143cab96601b733ab into f7ee925c8f874027b1b62c7e520989bb(size=12.3 K), total size for store is 26.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:00,763 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:00,763 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/A, priority=13, startTime=1732554600326; duration=0sec 2024-11-25T17:10:00,763 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:00,763 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:A 2024-11-25T17:10:00,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:00,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554660788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:00,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:00,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554660794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:00,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:00,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554660798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:00,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:00,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554660822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:00,906 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:00,909 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:00,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:00,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:00,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:00,910 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:00,916 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/6c982b410059498483d7b280d17b57a1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6c982b410059498483d7b280d17b57a1 2024-11-25T17:10:00,927 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/C of 3c0802cb7cf476d143cab96601b733ab into 6c982b410059498483d7b280d17b57a1(size=12.3 K), total size for store is 21.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:00,927 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:00,927 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/C, priority=13, startTime=1732554600332; duration=0sec 2024-11-25T17:10:00,928 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:00,928 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:C 2024-11-25T17:10:01,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:01,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554660999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:01,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:01,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554661006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:01,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:01,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554661008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:01,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:01,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554661009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:01,017 DEBUG [Thread-2043 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4192 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., hostname=6579369734b6,41865,1732554474464, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:10:01,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:01,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554661037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:01,061 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:01,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:01,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:01,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:01,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:01,062 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,124 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/a9aa92db24874235b89677443a24d436 2024-11-25T17:10:01,139 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/48db6144c0294fe0b0699fdfc067b12c is 50, key is test_row_0/B:col10/1732554600644/Put/seqid=0 2024-11-25T17:10:01,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742348_1524 (size=12301) 2024-11-25T17:10:01,214 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:01,217 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:01,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:01,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:01,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:01,218 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554661314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:01,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554661318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:01,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:01,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554661318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:01,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:01,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554661349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:01,369 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:01,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:01,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:01,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:01,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:01,370 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,523 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:01,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:01,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:01,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:01,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:01,524 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-25T17:10:01,609 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/48db6144c0294fe0b0699fdfc067b12c 2024-11-25T17:10:01,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/65b7fe1f639949608e6b7be891101762 is 50, key is test_row_0/C:col10/1732554600644/Put/seqid=0 2024-11-25T17:10:01,677 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:01,678 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:01,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:01,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:01,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:01,679 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742349_1525 (size=12301) 2024-11-25T17:10:01,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/65b7fe1f639949608e6b7be891101762 2024-11-25T17:10:01,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/a9aa92db24874235b89677443a24d436 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/a9aa92db24874235b89677443a24d436 2024-11-25T17:10:01,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/a9aa92db24874235b89677443a24d436, entries=150, sequenceid=316, filesize=12.0 K 2024-11-25T17:10:01,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/48db6144c0294fe0b0699fdfc067b12c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/48db6144c0294fe0b0699fdfc067b12c 2024-11-25T17:10:01,732 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/48db6144c0294fe0b0699fdfc067b12c, entries=150, sequenceid=316, filesize=12.0 K 2024-11-25T17:10:01,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/65b7fe1f639949608e6b7be891101762 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/65b7fe1f639949608e6b7be891101762 2024-11-25T17:10:01,758 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/65b7fe1f639949608e6b7be891101762, entries=150, sequenceid=316, filesize=12.0 K 2024-11-25T17:10:01,762 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 3c0802cb7cf476d143cab96601b733ab in 1117ms, sequenceid=316, compaction requested=true 2024-11-25T17:10:01,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:01,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:01,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:01,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:01,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:01,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:01,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-25T17:10:01,762 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:01,763 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:01,767 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39647 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:01,767 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/A is initiating minor compaction (all files) 2024-11-25T17:10:01,767 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/A in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:01,767 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/f7ee925c8f874027b1b62c7e520989bb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/5e3e00b2df26449995cbe228e0ab990e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/a9aa92db24874235b89677443a24d436] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=38.7 K 2024-11-25T17:10:01,767 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34767 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:01,767 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/B is initiating minor compaction (all files) 2024-11-25T17:10:01,767 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/B in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:01,767 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/e1ce42f9f4c44b48834083051e29ce74, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/003b2482634b459fa7fca93282f8a368, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/48db6144c0294fe0b0699fdfc067b12c] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=34.0 K 2024-11-25T17:10:01,768 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7ee925c8f874027b1b62c7e520989bb, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732554598682 2024-11-25T17:10:01,768 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting e1ce42f9f4c44b48834083051e29ce74, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732554598682 2024-11-25T17:10:01,768 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e3e00b2df26449995cbe228e0ab990e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732554599326 2024-11-25T17:10:01,771 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 003b2482634b459fa7fca93282f8a368, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732554599326 2024-11-25T17:10:01,771 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9aa92db24874235b89677443a24d436, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732554600521 2024-11-25T17:10:01,771 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 48db6144c0294fe0b0699fdfc067b12c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732554600521 2024-11-25T17:10:01,806 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#B#compaction#448 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:01,807 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/74e0db895016422a90a56cd52a42fcc7 is 50, key is test_row_0/B:col10/1732554600644/Put/seqid=0 2024-11-25T17:10:01,819 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#A#compaction#449 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:01,820 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/849f47db064b464ca656a1c8501ace67 is 50, key is test_row_0/A:col10/1732554600644/Put/seqid=0 2024-11-25T17:10:01,833 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:01,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:10:01,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:01,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:10:01,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:01,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:10:01,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:01,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:10:01,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:01,837 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:01,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:01,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:01,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:01,838 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:01,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742350_1526 (size=12711) 2024-11-25T17:10:01,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/9e582432168e45df8abf991c1907f44f is 50, key is test_row_0/A:col10/1732554601833/Put/seqid=0 2024-11-25T17:10:01,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742351_1527 (size=12711) 2024-11-25T17:10:01,900 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/849f47db064b464ca656a1c8501ace67 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/849f47db064b464ca656a1c8501ace67 2024-11-25T17:10:01,907 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/A of 3c0802cb7cf476d143cab96601b733ab into 849f47db064b464ca656a1c8501ace67(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:01,907 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:01,907 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/A, priority=13, startTime=1732554601762; duration=0sec 2024-11-25T17:10:01,907 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:01,907 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:A 2024-11-25T17:10:01,907 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:01,908 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34767 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:01,908 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/C is initiating minor compaction (all files) 2024-11-25T17:10:01,908 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/C in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:01,909 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6c982b410059498483d7b280d17b57a1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/39b9f686546347b7a8b7f3f0d346e401, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/65b7fe1f639949608e6b7be891101762] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=34.0 K 2024-11-25T17:10:01,909 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c982b410059498483d7b280d17b57a1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732554598682 2024-11-25T17:10:01,909 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39b9f686546347b7a8b7f3f0d346e401, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732554599326 2024-11-25T17:10:01,910 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 65b7fe1f639949608e6b7be891101762, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732554600521 2024-11-25T17:10:01,919 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#C#compaction#451 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:01,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742352_1528 (size=19621) 2024-11-25T17:10:01,920 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/2ce8f00324ad451490e756a6e036c408 is 50, key is test_row_0/C:col10/1732554600644/Put/seqid=0 2024-11-25T17:10:01,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742353_1529 (size=12711) 2024-11-25T17:10:01,948 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/2ce8f00324ad451490e756a6e036c408 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/2ce8f00324ad451490e756a6e036c408 2024-11-25T17:10:01,958 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/C of 3c0802cb7cf476d143cab96601b733ab into 2ce8f00324ad451490e756a6e036c408(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:01,958 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:01,958 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/C, priority=13, startTime=1732554601762; duration=0sec 2024-11-25T17:10:01,958 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:01,958 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:C 2024-11-25T17:10:01,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:01,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554661943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:01,962 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:01,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554661943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:01,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:01,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554661946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:01,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:01,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554661947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:02,001 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:02,005 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:02,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:02,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:02,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:02,006 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:02,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554662063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:02,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:02,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554662066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:02,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:02,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554662067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:02,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:02,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554662077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:02,163 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:02,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:02,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:02,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:02,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:02,164 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:02,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554662277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:02,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:02,290 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/74e0db895016422a90a56cd52a42fcc7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/74e0db895016422a90a56cd52a42fcc7 2024-11-25T17:10:02,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554662286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:02,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:02,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554662286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:02,296 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/B of 3c0802cb7cf476d143cab96601b733ab into 74e0db895016422a90a56cd52a42fcc7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:02,296 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:02,296 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/B, priority=13, startTime=1732554601762; duration=0sec 2024-11-25T17:10:02,296 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:02,296 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:B 2024-11-25T17:10:02,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:02,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554662293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:02,315 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/9e582432168e45df8abf991c1907f44f 2024-11-25T17:10:02,318 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:02,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:02,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:02,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:02,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:02,319 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/71bd23e72af4442899cf92710b5f0ac1 is 50, key is test_row_0/B:col10/1732554601833/Put/seqid=0 2024-11-25T17:10:02,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742354_1530 (size=12301) 2024-11-25T17:10:02,479 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:02,480 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:02,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:02,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:02,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:02,480 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:02,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554662588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:02,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:02,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554662593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:02,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:02,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554662593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:02,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:02,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554662610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:02,632 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:02,633 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:02,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:02,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:02,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:02,633 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,736 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/71bd23e72af4442899cf92710b5f0ac1 2024-11-25T17:10:02,745 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/8b06d78687b643baa98f08143d180262 is 50, key is test_row_0/C:col10/1732554601833/Put/seqid=0 2024-11-25T17:10:02,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742355_1531 (size=12301) 2024-11-25T17:10:02,789 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:02,793 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:02,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:02,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:02,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:02,794 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,951 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:02,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:02,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:02,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:02,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:02,954 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:02,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:03,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:03,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554663098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:03,106 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:03,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:03,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:03,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:03,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:03,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:03,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:03,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:03,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554663101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:03,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:03,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554663101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:03,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:03,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:03,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554663123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:03,152 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/8b06d78687b643baa98f08143d180262 2024-11-25T17:10:03,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/9e582432168e45df8abf991c1907f44f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/9e582432168e45df8abf991c1907f44f 2024-11-25T17:10:03,169 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/9e582432168e45df8abf991c1907f44f, entries=300, sequenceid=330, filesize=19.2 K 2024-11-25T17:10:03,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/71bd23e72af4442899cf92710b5f0ac1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/71bd23e72af4442899cf92710b5f0ac1 2024-11-25T17:10:03,174 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/71bd23e72af4442899cf92710b5f0ac1, entries=150, sequenceid=330, filesize=12.0 K 2024-11-25T17:10:03,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/8b06d78687b643baa98f08143d180262 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/8b06d78687b643baa98f08143d180262 2024-11-25T17:10:03,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/8b06d78687b643baa98f08143d180262, entries=150, sequenceid=330, filesize=12.0 K 2024-11-25T17:10:03,183 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 3c0802cb7cf476d143cab96601b733ab in 1347ms, sequenceid=330, compaction requested=false 2024-11-25T17:10:03,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:03,261 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:03,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-25T17:10:03,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:03,265 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-25T17:10:03,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:10:03,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:03,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:10:03,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:03,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:10:03,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:03,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/3f374274f3824e1db72c9dca7a14167b is 50, key is test_row_0/A:col10/1732554601942/Put/seqid=0 2024-11-25T17:10:03,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742356_1532 (size=12301) 2024-11-25T17:10:03,318 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/3f374274f3824e1db72c9dca7a14167b 2024-11-25T17:10:03,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/23d885a7722a4e93a2dec6ccf74bb5c7 is 50, key is test_row_0/B:col10/1732554601942/Put/seqid=0 2024-11-25T17:10:03,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742357_1533 (size=12301) 2024-11-25T17:10:03,369 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/23d885a7722a4e93a2dec6ccf74bb5c7 2024-11-25T17:10:03,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/1d2ebae810d04784a35bb9cf51b9dd4b is 50, key is test_row_0/C:col10/1732554601942/Put/seqid=0 2024-11-25T17:10:03,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742358_1534 (size=12301) 2024-11-25T17:10:03,447 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/1d2ebae810d04784a35bb9cf51b9dd4b 2024-11-25T17:10:03,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/3f374274f3824e1db72c9dca7a14167b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3f374274f3824e1db72c9dca7a14167b 2024-11-25T17:10:03,475 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3f374274f3824e1db72c9dca7a14167b, entries=150, sequenceid=357, filesize=12.0 K 2024-11-25T17:10:03,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/23d885a7722a4e93a2dec6ccf74bb5c7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/23d885a7722a4e93a2dec6ccf74bb5c7 2024-11-25T17:10:03,502 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/23d885a7722a4e93a2dec6ccf74bb5c7, entries=150, sequenceid=357, filesize=12.0 K 2024-11-25T17:10:03,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/1d2ebae810d04784a35bb9cf51b9dd4b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/1d2ebae810d04784a35bb9cf51b9dd4b 2024-11-25T17:10:03,536 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/1d2ebae810d04784a35bb9cf51b9dd4b, entries=150, sequenceid=357, filesize=12.0 K 2024-11-25T17:10:03,537 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 3c0802cb7cf476d143cab96601b733ab in 271ms, sequenceid=357, compaction requested=true 2024-11-25T17:10:03,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:03,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:03,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-25T17:10:03,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-25T17:10:03,543 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-25T17:10:03,543 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.0510 sec 2024-11-25T17:10:03,544 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 4.0580 sec 2024-11-25T17:10:03,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-25T17:10:03,614 INFO [Thread-2051 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-25T17:10:03,617 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:10:03,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-25T17:10:03,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-25T17:10:03,619 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:10:03,620 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:10:03,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:10:03,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-25T17:10:03,772 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:03,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-25T17:10:03,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:03,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:03,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:03,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-25T17:10:03,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-25T17:10:03,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-25T17:10:03,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 157 msec 2024-11-25T17:10:03,779 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 162 msec 2024-11-25T17:10:03,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-25T17:10:03,921 INFO [Thread-2051 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-25T17:10:03,923 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:10:03,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-11-25T17:10:03,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-25T17:10:03,925 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:10:03,926 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:10:03,926 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:10:04,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-25T17:10:04,078 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:04,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-25T17:10:04,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:04,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:04,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:04,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-11-25T17:10:04,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-11-25T17:10:04,083 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-25T17:10:04,083 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 156 msec 2024-11-25T17:10:04,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 160 msec 2024-11-25T17:10:04,150 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:10:04,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:10:04,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:04,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:10:04,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:04,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:10:04,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:04,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:04,183 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/47c140eac9204d6d9fafd25b5037e0a7 is 50, key is test_row_0/A:col10/1732554604148/Put/seqid=0 2024-11-25T17:10:04,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742359_1535 (size=14741) 2024-11-25T17:10:04,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-25T17:10:04,242 INFO [Thread-2051 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-25T17:10:04,243 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:10:04,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-11-25T17:10:04,245 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:10:04,245 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:10:04,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:10:04,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-25T17:10:04,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:04,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554664239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:04,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:04,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554664240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:04,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:04,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554664245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:04,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:04,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554664251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:04,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-25T17:10:04,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:04,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554664356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:04,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:04,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554664357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:04,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:04,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554664366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:04,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:04,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554664367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:04,400 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:04,409 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-25T17:10:04,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:04,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:04,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:04,412 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:04,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:04,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:04,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-25T17:10:04,569 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:04,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-25T17:10:04,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:04,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:04,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:04,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:04,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:04,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:04,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:04,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554664570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:04,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:04,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554664570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:04,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:04,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554664570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:04,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:04,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554664572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:04,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/47c140eac9204d6d9fafd25b5037e0a7 2024-11-25T17:10:04,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/cea0b8dca4564f90ace486e110e19493 is 50, key is test_row_0/B:col10/1732554604148/Put/seqid=0 2024-11-25T17:10:04,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742360_1536 (size=12301) 2024-11-25T17:10:04,725 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:04,727 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-25T17:10:04,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:04,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:04,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:04,728 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:04,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:04,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:04,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-25T17:10:04,883 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:04,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-25T17:10:04,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:04,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:04,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:04,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:04,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:04,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:04,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:04,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554664883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:04,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:04,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554664883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:04,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:04,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554664885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:04,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:04,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554664885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:05,038 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:05,038 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-25T17:10:05,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:05,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:05,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:05,039 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:05,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:05,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:05,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:05,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50862 deadline: 1732554665037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:05,049 DEBUG [Thread-2043 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8224 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., hostname=6579369734b6,41865,1732554474464, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:10:05,115 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/cea0b8dca4564f90ace486e110e19493 2024-11-25T17:10:05,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/67f11ab29b944419bbd5fb00e7986203 is 50, key is test_row_0/C:col10/1732554604148/Put/seqid=0 2024-11-25T17:10:05,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742361_1537 (size=12301) 2024-11-25T17:10:05,190 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:05,190 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-25T17:10:05,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:05,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:05,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:05,191 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:05,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:05,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:05,344 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:05,344 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-25T17:10:05,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:05,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:05,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:05,344 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:05,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:05,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:05,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-25T17:10:05,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:05,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554665390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:05,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:05,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554665391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:05,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:05,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554665391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:05,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:05,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554665392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:05,496 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:05,496 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-25T17:10:05,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:05,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:05,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:05,497 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:05,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:05,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:05,580 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/67f11ab29b944419bbd5fb00e7986203 2024-11-25T17:10:05,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/47c140eac9204d6d9fafd25b5037e0a7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/47c140eac9204d6d9fafd25b5037e0a7 2024-11-25T17:10:05,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/47c140eac9204d6d9fafd25b5037e0a7, entries=200, sequenceid=370, filesize=14.4 K 2024-11-25T17:10:05,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/cea0b8dca4564f90ace486e110e19493 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/cea0b8dca4564f90ace486e110e19493 2024-11-25T17:10:05,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/cea0b8dca4564f90ace486e110e19493, entries=150, sequenceid=370, filesize=12.0 K 2024-11-25T17:10:05,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/67f11ab29b944419bbd5fb00e7986203 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/67f11ab29b944419bbd5fb00e7986203 2024-11-25T17:10:05,598 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/67f11ab29b944419bbd5fb00e7986203, entries=150, sequenceid=370, filesize=12.0 K 2024-11-25T17:10:05,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 3c0802cb7cf476d143cab96601b733ab in 1448ms, sequenceid=370, compaction requested=true 2024-11-25T17:10:05,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:05,599 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:05,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:05,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:05,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:05,599 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:05,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:05,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:05,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:05,600 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 59374 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:05,600 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/A is initiating minor compaction (all files) 2024-11-25T17:10:05,600 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/A in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:05,600 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/849f47db064b464ca656a1c8501ace67, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/9e582432168e45df8abf991c1907f44f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3f374274f3824e1db72c9dca7a14167b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/47c140eac9204d6d9fafd25b5037e0a7] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=58.0 K 2024-11-25T17:10:05,601 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 849f47db064b464ca656a1c8501ace67, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732554600521 2024-11-25T17:10:05,606 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49614 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:05,606 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/B is initiating minor compaction (all files) 2024-11-25T17:10:05,606 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/B in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:05,606 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/74e0db895016422a90a56cd52a42fcc7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/71bd23e72af4442899cf92710b5f0ac1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/23d885a7722a4e93a2dec6ccf74bb5c7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/cea0b8dca4564f90ace486e110e19493] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=48.5 K 2024-11-25T17:10:05,607 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e582432168e45df8abf991c1907f44f, keycount=300, bloomtype=ROW, size=19.2 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732554600667 2024-11-25T17:10:05,607 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 74e0db895016422a90a56cd52a42fcc7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732554600521 2024-11-25T17:10:05,608 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f374274f3824e1db72c9dca7a14167b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1732554601922 2024-11-25T17:10:05,608 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 71bd23e72af4442899cf92710b5f0ac1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732554601833 2024-11-25T17:10:05,612 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47c140eac9204d6d9fafd25b5037e0a7, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732554604118 2024-11-25T17:10:05,612 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 23d885a7722a4e93a2dec6ccf74bb5c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1732554601922 2024-11-25T17:10:05,613 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting cea0b8dca4564f90ace486e110e19493, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732554604118 2024-11-25T17:10:05,624 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#B#compaction#460 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:05,624 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#A#compaction#461 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:05,625 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/ac61ff0a0ae4466a8b3a26614a53815a is 50, key is test_row_0/B:col10/1732554604148/Put/seqid=0 2024-11-25T17:10:05,627 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/3c617f072cf54a2aa86880be84eafdd4 is 50, key is test_row_0/A:col10/1732554604148/Put/seqid=0 2024-11-25T17:10:05,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742362_1538 (size=12847) 2024-11-25T17:10:05,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742363_1539 (size=12847) 2024-11-25T17:10:05,649 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:05,649 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-25T17:10:05,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:05,650 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-25T17:10:05,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:10:05,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:05,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:10:05,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:05,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:10:05,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:05,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/bb3201198c844641b600187a92ded3e1 is 50, key is test_row_0/A:col10/1732554604211/Put/seqid=0 2024-11-25T17:10:05,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742364_1540 (size=12301) 2024-11-25T17:10:06,047 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/3c617f072cf54a2aa86880be84eafdd4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3c617f072cf54a2aa86880be84eafdd4 2024-11-25T17:10:06,047 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/ac61ff0a0ae4466a8b3a26614a53815a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/ac61ff0a0ae4466a8b3a26614a53815a 2024-11-25T17:10:06,052 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/B of 3c0802cb7cf476d143cab96601b733ab into ac61ff0a0ae4466a8b3a26614a53815a(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:06,053 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:06,053 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/B, priority=12, startTime=1732554605599; duration=0sec 2024-11-25T17:10:06,053 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/A of 3c0802cb7cf476d143cab96601b733ab into 3c617f072cf54a2aa86880be84eafdd4(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:06,053 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:06,053 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/A, priority=12, startTime=1732554605598; duration=0sec 2024-11-25T17:10:06,053 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:06,053 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:B 2024-11-25T17:10:06,053 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:06,053 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:A 2024-11-25T17:10:06,053 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:06,055 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49614 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:06,055 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/C is initiating minor compaction (all files) 2024-11-25T17:10:06,055 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/C in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:06,055 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/2ce8f00324ad451490e756a6e036c408, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/8b06d78687b643baa98f08143d180262, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/1d2ebae810d04784a35bb9cf51b9dd4b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/67f11ab29b944419bbd5fb00e7986203] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=48.5 K 2024-11-25T17:10:06,056 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ce8f00324ad451490e756a6e036c408, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732554600521 2024-11-25T17:10:06,056 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b06d78687b643baa98f08143d180262, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732554601833 2024-11-25T17:10:06,056 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d2ebae810d04784a35bb9cf51b9dd4b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1732554601922 2024-11-25T17:10:06,057 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 67f11ab29b944419bbd5fb00e7986203, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732554604118 2024-11-25T17:10:06,079 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/bb3201198c844641b600187a92ded3e1 2024-11-25T17:10:06,084 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#C#compaction#463 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:06,084 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/7d00b34468b841ee8376b47f8d96cf79 is 50, key is test_row_0/C:col10/1732554604148/Put/seqid=0 2024-11-25T17:10:06,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/6519633562094b448eaa350d0158f581 is 50, key is test_row_0/B:col10/1732554604211/Put/seqid=0 2024-11-25T17:10:06,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742366_1542 (size=12301) 2024-11-25T17:10:06,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742365_1541 (size=12847) 2024-11-25T17:10:06,130 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/7d00b34468b841ee8376b47f8d96cf79 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/7d00b34468b841ee8376b47f8d96cf79 2024-11-25T17:10:06,140 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/C of 3c0802cb7cf476d143cab96601b733ab into 7d00b34468b841ee8376b47f8d96cf79(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:06,140 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:06,140 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/C, priority=12, startTime=1732554605599; duration=0sec 2024-11-25T17:10:06,140 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:06,140 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:C 2024-11-25T17:10:06,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-25T17:10:06,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:06,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:06,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:06,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554666407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:06,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:06,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554666410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:06,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:06,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554666413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:06,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:06,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554666415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:06,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:06,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554666512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:06,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:06,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554666512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:06,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:06,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554666518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:06,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:06,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554666519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:06,524 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/6519633562094b448eaa350d0158f581 2024-11-25T17:10:06,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/45fd27d1a9764d709c2da3c1bc028b2d is 50, key is test_row_0/C:col10/1732554604211/Put/seqid=0 2024-11-25T17:10:06,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742367_1543 (size=12301) 2024-11-25T17:10:06,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:06,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554666717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:06,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:06,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554666717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:06,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:06,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554666725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:06,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:06,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554666725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:06,951 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/45fd27d1a9764d709c2da3c1bc028b2d 2024-11-25T17:10:06,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/bb3201198c844641b600187a92ded3e1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/bb3201198c844641b600187a92ded3e1 2024-11-25T17:10:06,962 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/bb3201198c844641b600187a92ded3e1, entries=150, sequenceid=395, filesize=12.0 K 2024-11-25T17:10:06,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/6519633562094b448eaa350d0158f581 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6519633562094b448eaa350d0158f581 2024-11-25T17:10:06,969 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6519633562094b448eaa350d0158f581, entries=150, sequenceid=395, filesize=12.0 K 2024-11-25T17:10:06,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/45fd27d1a9764d709c2da3c1bc028b2d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/45fd27d1a9764d709c2da3c1bc028b2d 2024-11-25T17:10:06,973 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/45fd27d1a9764d709c2da3c1bc028b2d, entries=150, sequenceid=395, filesize=12.0 K 2024-11-25T17:10:06,974 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 3c0802cb7cf476d143cab96601b733ab in 1324ms, sequenceid=395, compaction requested=false 2024-11-25T17:10:06,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:06,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:06,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-11-25T17:10:06,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-11-25T17:10:06,985 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-25T17:10:06,985 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7370 sec 2024-11-25T17:10:06,986 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 2.7420 sec 2024-11-25T17:10:07,032 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-25T17:10:07,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:10:07,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:07,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:10:07,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:07,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:10:07,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:07,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:07,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/e020a376f0c84761af6f4d10c6440047 is 50, key is test_row_0/A:col10/1732554606405/Put/seqid=0 2024-11-25T17:10:07,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742368_1544 (size=12301) 2024-11-25T17:10:07,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554667078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554667080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554667081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554667082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554667186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554667188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554667188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,194 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554667192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554667393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554667394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554667395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554667396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,458 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/e020a376f0c84761af6f4d10c6440047 2024-11-25T17:10:07,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/9858e2d19ffd44ef81337b00857478ba is 50, key is test_row_0/B:col10/1732554606405/Put/seqid=0 2024-11-25T17:10:07,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742369_1545 (size=12301) 2024-11-25T17:10:07,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/9858e2d19ffd44ef81337b00857478ba 2024-11-25T17:10:07,487 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/af11991af1194c71b6313aefb8c433ce is 50, key is test_row_0/C:col10/1732554606405/Put/seqid=0 2024-11-25T17:10:07,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742370_1546 (size=12301) 2024-11-25T17:10:07,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/af11991af1194c71b6313aefb8c433ce 2024-11-25T17:10:07,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/e020a376f0c84761af6f4d10c6440047 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e020a376f0c84761af6f4d10c6440047 2024-11-25T17:10:07,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e020a376f0c84761af6f4d10c6440047, entries=150, sequenceid=411, filesize=12.0 K 2024-11-25T17:10:07,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/9858e2d19ffd44ef81337b00857478ba as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/9858e2d19ffd44ef81337b00857478ba 2024-11-25T17:10:07,530 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/9858e2d19ffd44ef81337b00857478ba, entries=150, sequenceid=411, filesize=12.0 K 2024-11-25T17:10:07,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/af11991af1194c71b6313aefb8c433ce as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/af11991af1194c71b6313aefb8c433ce 2024-11-25T17:10:07,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/af11991af1194c71b6313aefb8c433ce, entries=150, sequenceid=411, filesize=12.0 K 2024-11-25T17:10:07,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 3c0802cb7cf476d143cab96601b733ab in 503ms, sequenceid=411, compaction requested=true 2024-11-25T17:10:07,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:07,535 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:07,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:07,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:07,535 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:07,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:07,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:07,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:07,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:07,536 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37449 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:07,536 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/B is initiating minor compaction (all files) 2024-11-25T17:10:07,536 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/B in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:07,536 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/ac61ff0a0ae4466a8b3a26614a53815a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6519633562094b448eaa350d0158f581, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/9858e2d19ffd44ef81337b00857478ba] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=36.6 K 2024-11-25T17:10:07,537 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37449 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:07,537 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/A is initiating minor compaction (all files) 2024-11-25T17:10:07,537 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting ac61ff0a0ae4466a8b3a26614a53815a, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732554604118 2024-11-25T17:10:07,537 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/A in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:07,537 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3c617f072cf54a2aa86880be84eafdd4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/bb3201198c844641b600187a92ded3e1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e020a376f0c84761af6f4d10c6440047] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=36.6 K 2024-11-25T17:10:07,537 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 6519633562094b448eaa350d0158f581, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732554604211 2024-11-25T17:10:07,537 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c617f072cf54a2aa86880be84eafdd4, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732554604118 2024-11-25T17:10:07,538 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 9858e2d19ffd44ef81337b00857478ba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732554606405 2024-11-25T17:10:07,538 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb3201198c844641b600187a92ded3e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732554604211 2024-11-25T17:10:07,539 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting e020a376f0c84761af6f4d10c6440047, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732554606405 2024-11-25T17:10:07,551 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#B#compaction#469 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:07,552 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/243c549489c4426a9f7b512194738936 is 50, key is test_row_0/B:col10/1732554606405/Put/seqid=0 2024-11-25T17:10:07,554 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#A#compaction#470 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:07,555 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/d3ab4150f6894af09477534c2a14672c is 50, key is test_row_0/A:col10/1732554606405/Put/seqid=0 2024-11-25T17:10:07,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742371_1547 (size=12949) 2024-11-25T17:10:07,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742372_1548 (size=12949) 2024-11-25T17:10:07,577 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/d3ab4150f6894af09477534c2a14672c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/d3ab4150f6894af09477534c2a14672c 2024-11-25T17:10:07,586 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/A of 3c0802cb7cf476d143cab96601b733ab into d3ab4150f6894af09477534c2a14672c(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:07,586 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:07,586 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/A, priority=13, startTime=1732554607535; duration=0sec 2024-11-25T17:10:07,586 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:07,586 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:A 2024-11-25T17:10:07,586 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:07,588 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37449 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:07,592 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/C is initiating minor compaction (all files) 2024-11-25T17:10:07,592 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/C in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:07,592 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/7d00b34468b841ee8376b47f8d96cf79, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/45fd27d1a9764d709c2da3c1bc028b2d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/af11991af1194c71b6313aefb8c433ce] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=36.6 K 2024-11-25T17:10:07,592 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d00b34468b841ee8376b47f8d96cf79, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732554604118 2024-11-25T17:10:07,593 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45fd27d1a9764d709c2da3c1bc028b2d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732554604211 2024-11-25T17:10:07,593 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting af11991af1194c71b6313aefb8c433ce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732554606405 2024-11-25T17:10:07,623 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#C#compaction#471 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:07,624 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/7e4402c40f1f4954ac919253f5fc3d44 is 50, key is test_row_0/C:col10/1732554606405/Put/seqid=0 2024-11-25T17:10:07,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742373_1549 (size=12949) 2024-11-25T17:10:07,677 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/7e4402c40f1f4954ac919253f5fc3d44 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/7e4402c40f1f4954ac919253f5fc3d44 2024-11-25T17:10:07,681 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/C of 3c0802cb7cf476d143cab96601b733ab into 7e4402c40f1f4954ac919253f5fc3d44(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:07,681 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:07,681 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/C, priority=13, startTime=1732554607535; duration=0sec 2024-11-25T17:10:07,681 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:07,681 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:C 2024-11-25T17:10:07,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:07,708 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-25T17:10:07,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:10:07,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:07,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:10:07,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:07,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:10:07,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:07,720 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/0ecb196534d34e85951fd9a821f56ad8 is 50, key is test_row_0/A:col10/1732554607074/Put/seqid=0 2024-11-25T17:10:07,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742374_1550 (size=17181) 2024-11-25T17:10:07,738 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/0ecb196534d34e85951fd9a821f56ad8 2024-11-25T17:10:07,756 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/6f9b603a68d1442b8ac784e8ca3e5c99 is 50, key is test_row_0/B:col10/1732554607074/Put/seqid=0 2024-11-25T17:10:07,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554667765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554667769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554667770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554667771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742375_1551 (size=12301) 2024-11-25T17:10:07,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554667871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554667886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554667886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:07,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554667886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:07,983 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/243c549489c4426a9f7b512194738936 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/243c549489c4426a9f7b512194738936 2024-11-25T17:10:07,988 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/B of 3c0802cb7cf476d143cab96601b733ab into 243c549489c4426a9f7b512194738936(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:07,988 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:07,988 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/B, priority=13, startTime=1732554607535; duration=0sec 2024-11-25T17:10:07,988 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:07,988 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:B 2024-11-25T17:10:08,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:08,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554668080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:08,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554668102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:08,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554668102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:08,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:08,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554668104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:08,209 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/6f9b603a68d1442b8ac784e8ca3e5c99 2024-11-25T17:10:08,228 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/a7a9a7dd9fe94a17bda53ad9c7e71ec5 is 50, key is test_row_0/C:col10/1732554607074/Put/seqid=0 2024-11-25T17:10:08,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742376_1552 (size=12301) 2024-11-25T17:10:08,267 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/a7a9a7dd9fe94a17bda53ad9c7e71ec5 2024-11-25T17:10:08,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/0ecb196534d34e85951fd9a821f56ad8 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/0ecb196534d34e85951fd9a821f56ad8 2024-11-25T17:10:08,288 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/0ecb196534d34e85951fd9a821f56ad8, entries=250, sequenceid=438, filesize=16.8 K 2024-11-25T17:10:08,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/6f9b603a68d1442b8ac784e8ca3e5c99 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6f9b603a68d1442b8ac784e8ca3e5c99 2024-11-25T17:10:08,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6f9b603a68d1442b8ac784e8ca3e5c99, entries=150, sequenceid=438, filesize=12.0 K 2024-11-25T17:10:08,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/a7a9a7dd9fe94a17bda53ad9c7e71ec5 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a7a9a7dd9fe94a17bda53ad9c7e71ec5 2024-11-25T17:10:08,316 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a7a9a7dd9fe94a17bda53ad9c7e71ec5, entries=150, sequenceid=438, filesize=12.0 K 2024-11-25T17:10:08,317 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 3c0802cb7cf476d143cab96601b733ab in 609ms, sequenceid=438, compaction requested=false 2024-11-25T17:10:08,317 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-25T17:10:08,363 INFO [Thread-2051 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-25T17:10:08,377 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:10:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-11-25T17:10:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-25T17:10:08,382 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:10:08,383 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:10:08,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:10:08,414 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-25T17:10:08,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:10:08,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:08,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:10:08,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:08,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:10:08,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:08,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/4abf4c8ac9c641c2a41a9630884059e0 is 50, key is test_row_0/A:col10/1732554607767/Put/seqid=0 2024-11-25T17:10:08,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742377_1553 (size=14741) 2024-11-25T17:10:08,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-25T17:10:08,536 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:08,536 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-25T17:10:08,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:08,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:08,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:08,537 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:08,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:08,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:08,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554668524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:08,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:08,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554668525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:08,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:08,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:08,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554668537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:08,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:08,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554668538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:08,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:08,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:08,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554668642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:08,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554668642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:08,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:08,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554668661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:08,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:08,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554668662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:08,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-25T17:10:08,693 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:08,695 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-25T17:10:08,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:08,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:08,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:08,696 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:08,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:08,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:08,849 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:08,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-25T17:10:08,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:08,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:08,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:08,850 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:08,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:08,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:08,860 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/4abf4c8ac9c641c2a41a9630884059e0 2024-11-25T17:10:08,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:08,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554668853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:08,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:08,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554668855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:08,867 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/71e96a7ee3fc45cabc401f3b4c55dd78 is 50, key is test_row_0/B:col10/1732554607767/Put/seqid=0 2024-11-25T17:10:08,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:08,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554668874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:08,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:08,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554668874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:08,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742378_1554 (size=12301) 2024-11-25T17:10:08,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-25T17:10:09,002 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:09,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-25T17:10:09,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:09,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:09,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:09,003 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:09,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:09,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:09,155 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:09,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-25T17:10:09,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:09,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:09,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:09,156 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:09,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:09,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:09,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:09,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554669171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:09,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:09,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554669171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:09,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:09,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554669180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:09,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:09,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554669186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:09,297 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/71e96a7ee3fc45cabc401f3b4c55dd78 2024-11-25T17:10:09,312 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:09,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/6bb51c844c0b4c00b9822626ec8b5a6b is 50, key is test_row_0/C:col10/1732554607767/Put/seqid=0 2024-11-25T17:10:09,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-25T17:10:09,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:09,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:09,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:09,322 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:09,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:09,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:09,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742379_1555 (size=12301) 2024-11-25T17:10:09,350 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/6bb51c844c0b4c00b9822626ec8b5a6b 2024-11-25T17:10:09,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/4abf4c8ac9c641c2a41a9630884059e0 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4abf4c8ac9c641c2a41a9630884059e0 2024-11-25T17:10:09,367 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4abf4c8ac9c641c2a41a9630884059e0, entries=200, sequenceid=452, filesize=14.4 K 2024-11-25T17:10:09,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/71e96a7ee3fc45cabc401f3b4c55dd78 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/71e96a7ee3fc45cabc401f3b4c55dd78 2024-11-25T17:10:09,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/71e96a7ee3fc45cabc401f3b4c55dd78, entries=150, sequenceid=452, filesize=12.0 K 2024-11-25T17:10:09,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/6bb51c844c0b4c00b9822626ec8b5a6b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6bb51c844c0b4c00b9822626ec8b5a6b 2024-11-25T17:10:09,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6bb51c844c0b4c00b9822626ec8b5a6b, entries=150, sequenceid=452, filesize=12.0 K 2024-11-25T17:10:09,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 3c0802cb7cf476d143cab96601b733ab in 977ms, sequenceid=452, compaction requested=true 2024-11-25T17:10:09,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:09,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:09,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:09,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:09,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:09,392 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:09,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:09,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-25T17:10:09,392 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:09,393 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:09,393 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/B is initiating minor compaction (all files) 2024-11-25T17:10:09,394 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/B in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:09,394 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/243c549489c4426a9f7b512194738936, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6f9b603a68d1442b8ac784e8ca3e5c99, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/71e96a7ee3fc45cabc401f3b4c55dd78] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=36.7 K 2024-11-25T17:10:09,394 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 44871 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:09,394 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/A is initiating minor compaction (all files) 2024-11-25T17:10:09,394 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/A in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:09,395 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/d3ab4150f6894af09477534c2a14672c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/0ecb196534d34e85951fd9a821f56ad8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4abf4c8ac9c641c2a41a9630884059e0] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=43.8 K 2024-11-25T17:10:09,395 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 243c549489c4426a9f7b512194738936, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732554606405 2024-11-25T17:10:09,396 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f9b603a68d1442b8ac784e8ca3e5c99, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1732554607074 2024-11-25T17:10:09,396 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting d3ab4150f6894af09477534c2a14672c, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732554606405 2024-11-25T17:10:09,397 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71e96a7ee3fc45cabc401f3b4c55dd78, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732554607767 2024-11-25T17:10:09,397 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ecb196534d34e85951fd9a821f56ad8, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1732554607074 2024-11-25T17:10:09,397 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 4abf4c8ac9c641c2a41a9630884059e0, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732554607767 2024-11-25T17:10:09,415 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#A#compaction#478 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:09,416 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/546cb7668d73410dbdd6478b10f9650f is 50, key is test_row_0/A:col10/1732554607767/Put/seqid=0 2024-11-25T17:10:09,423 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#B#compaction#479 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:09,424 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/0a739ba7f8b34feeb38388af3be792d3 is 50, key is test_row_0/B:col10/1732554607767/Put/seqid=0 2024-11-25T17:10:09,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742380_1556 (size=13051) 2024-11-25T17:10:09,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742381_1557 (size=13051) 2024-11-25T17:10:09,478 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/546cb7668d73410dbdd6478b10f9650f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/546cb7668d73410dbdd6478b10f9650f 2024-11-25T17:10:09,480 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:09,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-25T17:10:09,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:09,482 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-25T17:10:09,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:10:09,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:09,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:10:09,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:09,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:10:09,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:09,483 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/A of 3c0802cb7cf476d143cab96601b733ab into 546cb7668d73410dbdd6478b10f9650f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:09,483 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:09,483 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/A, priority=13, startTime=1732554609392; duration=0sec 2024-11-25T17:10:09,483 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:09,484 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:A 2024-11-25T17:10:09,484 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:09,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-25T17:10:09,485 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:09,486 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/C is initiating minor compaction (all files) 2024-11-25T17:10:09,486 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/C in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:09,486 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/7e4402c40f1f4954ac919253f5fc3d44, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a7a9a7dd9fe94a17bda53ad9c7e71ec5, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6bb51c844c0b4c00b9822626ec8b5a6b] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=36.7 K 2024-11-25T17:10:09,486 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e4402c40f1f4954ac919253f5fc3d44, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732554606405 2024-11-25T17:10:09,486 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting a7a9a7dd9fe94a17bda53ad9c7e71ec5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1732554607074 2024-11-25T17:10:09,487 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 6bb51c844c0b4c00b9822626ec8b5a6b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732554607767 2024-11-25T17:10:09,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/4b303f368f9e4fb7a275731cf8324765 is 50, key is test_row_0/A:col10/1732554608499/Put/seqid=0 2024-11-25T17:10:09,512 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#C#compaction#481 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:09,512 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/e7433443f859442284f635fb4453c12d is 50, key is test_row_0/C:col10/1732554607767/Put/seqid=0 2024-11-25T17:10:09,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742382_1558 (size=12301) 2024-11-25T17:10:09,574 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/4b303f368f9e4fb7a275731cf8324765 2024-11-25T17:10:09,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/0af2c884a917420096742d374942199e is 50, key is test_row_0/B:col10/1732554608499/Put/seqid=0 2024-11-25T17:10:09,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742383_1559 (size=13051) 2024-11-25T17:10:09,617 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/e7433443f859442284f635fb4453c12d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/e7433443f859442284f635fb4453c12d 2024-11-25T17:10:09,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742384_1560 (size=12301) 2024-11-25T17:10:09,630 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/C of 3c0802cb7cf476d143cab96601b733ab into e7433443f859442284f635fb4453c12d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:09,630 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:09,630 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/C, priority=13, startTime=1732554609392; duration=0sec 2024-11-25T17:10:09,630 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:09,630 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:C 2024-11-25T17:10:09,633 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/0af2c884a917420096742d374942199e 2024-11-25T17:10:09,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/a20fb4be0742459c891f3dc8a06992f8 is 50, key is test_row_0/C:col10/1732554608499/Put/seqid=0 2024-11-25T17:10:09,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742385_1561 (size=12301) 2024-11-25T17:10:09,678 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/a20fb4be0742459c891f3dc8a06992f8 2024-11-25T17:10:09,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:09,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:09,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/4b303f368f9e4fb7a275731cf8324765 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4b303f368f9e4fb7a275731cf8324765 2024-11-25T17:10:09,693 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4b303f368f9e4fb7a275731cf8324765, entries=150, sequenceid=475, filesize=12.0 K 2024-11-25T17:10:09,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/0af2c884a917420096742d374942199e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/0af2c884a917420096742d374942199e 2024-11-25T17:10:09,702 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/0af2c884a917420096742d374942199e, entries=150, sequenceid=475, filesize=12.0 K 2024-11-25T17:10:09,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/a20fb4be0742459c891f3dc8a06992f8 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a20fb4be0742459c891f3dc8a06992f8 2024-11-25T17:10:09,706 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a20fb4be0742459c891f3dc8a06992f8, entries=150, sequenceid=475, filesize=12.0 K 2024-11-25T17:10:09,714 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=20.13 KB/20610 for 3c0802cb7cf476d143cab96601b733ab in 232ms, sequenceid=475, compaction requested=false 2024-11-25T17:10:09,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:09,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:09,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-25T17:10:09,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-11-25T17:10:09,716 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-25T17:10:09,716 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3320 sec 2024-11-25T17:10:09,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 1.3400 sec 2024-11-25T17:10:09,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:09,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:10:09,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:10:09,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:09,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:10:09,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:09,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:10:09,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:09,764 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/b4cac5d3844545c382a46418d52f49de is 50, key is test_row_0/A:col10/1732554609736/Put/seqid=0 2024-11-25T17:10:09,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742386_1562 (size=14741) 2024-11-25T17:10:09,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:09,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554669845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:09,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:09,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:09,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554669848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:09,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554669850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:09,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:09,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554669862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:09,886 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/0a739ba7f8b34feeb38388af3be792d3 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/0a739ba7f8b34feeb38388af3be792d3 2024-11-25T17:10:09,896 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/B of 3c0802cb7cf476d143cab96601b733ab into 0a739ba7f8b34feeb38388af3be792d3(size=12.7 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:09,896 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:09,896 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/B, priority=13, startTime=1732554609392; duration=0sec 2024-11-25T17:10:09,896 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:09,896 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:B 2024-11-25T17:10:09,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:09,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554669972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:09,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:09,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554669973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:09,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:09,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554669981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:09,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:09,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554669984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:10,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554670186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:10,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554670186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:10,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554670194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:10,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554670194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,219 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/b4cac5d3844545c382a46418d52f49de 2024-11-25T17:10:10,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/1293a21f4b304cc59e922c0f3e59d059 is 50, key is test_row_0/B:col10/1732554609736/Put/seqid=0 2024-11-25T17:10:10,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742387_1563 (size=12301) 2024-11-25T17:10:10,273 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/1293a21f4b304cc59e922c0f3e59d059 2024-11-25T17:10:10,288 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/2993b560c8ca4aa2b369030cfd25be97 is 50, key is test_row_0/C:col10/1732554609736/Put/seqid=0 2024-11-25T17:10:10,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742388_1564 (size=12301) 2024-11-25T17:10:10,347 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/2993b560c8ca4aa2b369030cfd25be97 2024-11-25T17:10:10,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/b4cac5d3844545c382a46418d52f49de as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/b4cac5d3844545c382a46418d52f49de 2024-11-25T17:10:10,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/b4cac5d3844545c382a46418d52f49de, entries=200, sequenceid=488, filesize=14.4 K 2024-11-25T17:10:10,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/1293a21f4b304cc59e922c0f3e59d059 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/1293a21f4b304cc59e922c0f3e59d059 2024-11-25T17:10:10,383 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/1293a21f4b304cc59e922c0f3e59d059, entries=150, sequenceid=488, filesize=12.0 K 2024-11-25T17:10:10,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/2993b560c8ca4aa2b369030cfd25be97 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/2993b560c8ca4aa2b369030cfd25be97 2024-11-25T17:10:10,399 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/2993b560c8ca4aa2b369030cfd25be97, entries=150, sequenceid=488, filesize=12.0 K 2024-11-25T17:10:10,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 3c0802cb7cf476d143cab96601b733ab in 642ms, sequenceid=488, compaction requested=true 2024-11-25T17:10:10,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:10,400 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:10,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:10,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:10,401 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:10,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:10,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:10,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:10,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:10,401 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:10,402 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/A is initiating minor compaction (all files) 2024-11-25T17:10:10,402 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/A in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:10,402 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/546cb7668d73410dbdd6478b10f9650f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4b303f368f9e4fb7a275731cf8324765, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/b4cac5d3844545c382a46418d52f49de] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=39.2 K 2024-11-25T17:10:10,402 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:10,402 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/B is initiating minor compaction (all files) 2024-11-25T17:10:10,402 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/B in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:10,402 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/0a739ba7f8b34feeb38388af3be792d3, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/0af2c884a917420096742d374942199e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/1293a21f4b304cc59e922c0f3e59d059] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=36.8 K 2024-11-25T17:10:10,402 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 546cb7668d73410dbdd6478b10f9650f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732554607767 2024-11-25T17:10:10,403 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a739ba7f8b34feeb38388af3be792d3, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732554607767 2024-11-25T17:10:10,403 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b303f368f9e4fb7a275731cf8324765, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732554608499 2024-11-25T17:10:10,403 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 0af2c884a917420096742d374942199e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732554608499 2024-11-25T17:10:10,403 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4cac5d3844545c382a46418d52f49de, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732554609736 2024-11-25T17:10:10,403 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 1293a21f4b304cc59e922c0f3e59d059, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732554609736 2024-11-25T17:10:10,412 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#B#compaction#487 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:10,413 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/7793c446dc204f75b08dc73e724ea069 is 50, key is test_row_0/B:col10/1732554609736/Put/seqid=0 2024-11-25T17:10:10,422 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#A#compaction#488 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:10,422 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/95d505d975b94732add7e341a3a6f50d is 50, key is test_row_0/A:col10/1732554609736/Put/seqid=0 2024-11-25T17:10:10,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742389_1565 (size=13153) 2024-11-25T17:10:10,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742390_1566 (size=13153) 2024-11-25T17:10:10,448 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/7793c446dc204f75b08dc73e724ea069 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7793c446dc204f75b08dc73e724ea069 2024-11-25T17:10:10,452 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/95d505d975b94732add7e341a3a6f50d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/95d505d975b94732add7e341a3a6f50d 2024-11-25T17:10:10,461 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/B of 3c0802cb7cf476d143cab96601b733ab into 7793c446dc204f75b08dc73e724ea069(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:10,461 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:10,461 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/B, priority=13, startTime=1732554610401; duration=0sec 2024-11-25T17:10:10,461 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:10,461 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:B 2024-11-25T17:10:10,461 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:10,463 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/A of 3c0802cb7cf476d143cab96601b733ab into 95d505d975b94732add7e341a3a6f50d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:10,463 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:10,463 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/A, priority=13, startTime=1732554610400; duration=0sec 2024-11-25T17:10:10,463 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:10,463 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:A 2024-11-25T17:10:10,463 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:10,463 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/C is initiating minor compaction (all files) 2024-11-25T17:10:10,463 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/C in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:10,463 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/e7433443f859442284f635fb4453c12d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a20fb4be0742459c891f3dc8a06992f8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/2993b560c8ca4aa2b369030cfd25be97] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=36.8 K 2024-11-25T17:10:10,464 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting e7433443f859442284f635fb4453c12d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732554607767 2024-11-25T17:10:10,464 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting a20fb4be0742459c891f3dc8a06992f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732554608499 2024-11-25T17:10:10,464 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 2993b560c8ca4aa2b369030cfd25be97, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732554609736 2024-11-25T17:10:10,481 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#C#compaction#489 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:10,481 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/7edadb880c994615a4fd43668fd1226c is 50, key is test_row_0/C:col10/1732554609736/Put/seqid=0 2024-11-25T17:10:10,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-25T17:10:10,488 INFO [Thread-2051 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-25T17:10:10,490 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:10:10,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees 2024-11-25T17:10:10,491 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:10:10,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-25T17:10:10,495 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:10:10,495 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:10:10,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:10,498 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-25T17:10:10,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:10:10,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:10,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:10:10,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:10,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:10:10,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:10,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742391_1567 (size=13153) 2024-11-25T17:10:10,510 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/acc58f8471c04922a0c39ea098bdbe9d is 50, key is test_row_0/A:col10/1732554610493/Put/seqid=0 2024-11-25T17:10:10,512 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/7edadb880c994615a4fd43668fd1226c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/7edadb880c994615a4fd43668fd1226c 2024-11-25T17:10:10,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:10,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554670517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,522 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/C of 3c0802cb7cf476d143cab96601b733ab into 7edadb880c994615a4fd43668fd1226c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:10,522 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:10,522 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/C, priority=13, startTime=1732554610401; duration=0sec 2024-11-25T17:10:10,522 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:10,522 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:C 2024-11-25T17:10:10,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:10,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554670518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,524 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:10,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554670520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:10,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554670522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742392_1568 (size=14741) 2024-11-25T17:10:10,532 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=517 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/acc58f8471c04922a0c39ea098bdbe9d 2024-11-25T17:10:10,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/3362ff3426c5435990ce0c1c6e425553 is 50, key is test_row_0/B:col10/1732554610493/Put/seqid=0 2024-11-25T17:10:10,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742393_1569 (size=12301) 2024-11-25T17:10:10,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=517 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/3362ff3426c5435990ce0c1c6e425553 2024-11-25T17:10:10,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-25T17:10:10,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/ac6992545ef54d1c9f3a14ba5feb7187 is 50, key is test_row_0/C:col10/1732554610493/Put/seqid=0 2024-11-25T17:10:10,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742394_1570 (size=12301) 2024-11-25T17:10:10,625 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=517 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/ac6992545ef54d1c9f3a14ba5feb7187 2024-11-25T17:10:10,630 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/acc58f8471c04922a0c39ea098bdbe9d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/acc58f8471c04922a0c39ea098bdbe9d 2024-11-25T17:10:10,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:10,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554670623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:10,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554670624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:10,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554670625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:10,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554670630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,637 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/acc58f8471c04922a0c39ea098bdbe9d, entries=200, sequenceid=517, filesize=14.4 K 2024-11-25T17:10:10,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/3362ff3426c5435990ce0c1c6e425553 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/3362ff3426c5435990ce0c1c6e425553 2024-11-25T17:10:10,649 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:10,650 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/3362ff3426c5435990ce0c1c6e425553, entries=150, sequenceid=517, filesize=12.0 K 2024-11-25T17:10:10,650 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-25T17:10:10,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:10,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:10,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:10,650 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:10,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:10,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:10,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/ac6992545ef54d1c9f3a14ba5feb7187 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ac6992545ef54d1c9f3a14ba5feb7187 2024-11-25T17:10:10,661 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ac6992545ef54d1c9f3a14ba5feb7187, entries=150, sequenceid=517, filesize=12.0 K 2024-11-25T17:10:10,662 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 3c0802cb7cf476d143cab96601b733ab in 164ms, sequenceid=517, compaction requested=false 2024-11-25T17:10:10,662 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:10,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-25T17:10:10,803 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:10,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-25T17:10:10,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:10,809 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-25T17:10:10,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:10:10,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:10,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:10:10,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:10,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:10:10,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:10,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/6da84ea43c094f9d9980891d8b08bcf2 is 50, key is test_row_0/A:col10/1732554610516/Put/seqid=0 2024-11-25T17:10:10,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. as already flushing 2024-11-25T17:10:10,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:10,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742395_1571 (size=9857) 2024-11-25T17:10:10,849 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=528 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/6da84ea43c094f9d9980891d8b08bcf2 2024-11-25T17:10:10,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/fb7ed60cf4c74226beb553eb900079b7 is 50, key is test_row_0/B:col10/1732554610516/Put/seqid=0 2024-11-25T17:10:10,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742396_1572 (size=9857) 2024-11-25T17:10:10,890 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=528 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/fb7ed60cf4c74226beb553eb900079b7 2024-11-25T17:10:10,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/c59affd2b8d948f2ba2b695d817eaa91 is 50, key is test_row_0/C:col10/1732554610516/Put/seqid=0 2024-11-25T17:10:10,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:10,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554670902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:10,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554670904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:10,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554670905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:10,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554670908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:10,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742397_1573 (size=9857) 2024-11-25T17:10:10,929 DEBUG [Thread-2056 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2070263a to 127.0.0.1:56265 2024-11-25T17:10:10,929 DEBUG [Thread-2056 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:10,930 DEBUG [Thread-2054 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bc486e1 to 127.0.0.1:56265 2024-11-25T17:10:10,930 DEBUG [Thread-2054 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:10,932 DEBUG [Thread-2052 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b8b6e04 to 127.0.0.1:56265 2024-11-25T17:10:10,932 DEBUG [Thread-2052 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:10,936 DEBUG [Thread-2058 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6050584c to 127.0.0.1:56265 2024-11-25T17:10:10,936 DEBUG [Thread-2058 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:10,938 DEBUG [Thread-2060 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6dd48863 to 127.0.0.1:56265 2024-11-25T17:10:10,938 DEBUG [Thread-2060 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:11,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:11,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554671011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:11,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:11,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554671015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:11,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:11,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554671016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:11,018 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:11,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554671018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:11,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-25T17:10:11,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:11,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50846 deadline: 1732554671213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:11,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:11,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50878 deadline: 1732554671216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:11,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:11,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50884 deadline: 1732554671217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:11,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:11,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:50804 deadline: 1732554671219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:11,327 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=528 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/c59affd2b8d948f2ba2b695d817eaa91 2024-11-25T17:10:11,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/6da84ea43c094f9d9980891d8b08bcf2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/6da84ea43c094f9d9980891d8b08bcf2 2024-11-25T17:10:11,337 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/6da84ea43c094f9d9980891d8b08bcf2, entries=100, sequenceid=528, filesize=9.6 K 2024-11-25T17:10:11,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/fb7ed60cf4c74226beb553eb900079b7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/fb7ed60cf4c74226beb553eb900079b7 2024-11-25T17:10:11,340 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/fb7ed60cf4c74226beb553eb900079b7, entries=100, sequenceid=528, filesize=9.6 K 2024-11-25T17:10:11,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/c59affd2b8d948f2ba2b695d817eaa91 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/c59affd2b8d948f2ba2b695d817eaa91 2024-11-25T17:10:11,343 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/c59affd2b8d948f2ba2b695d817eaa91, entries=100, sequenceid=528, filesize=9.6 K 2024-11-25T17:10:11,344 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 3c0802cb7cf476d143cab96601b733ab in 535ms, sequenceid=528, compaction requested=true 2024-11-25T17:10:11,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:11,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:11,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=151 2024-11-25T17:10:11,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=151 2024-11-25T17:10:11,346 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-25T17:10:11,346 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 850 msec 2024-11-25T17:10:11,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees in 856 msec 2024-11-25T17:10:11,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:11,516 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-25T17:10:11,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:10:11,516 DEBUG [Thread-2045 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65df2359 to 127.0.0.1:56265 2024-11-25T17:10:11,516 DEBUG [Thread-2045 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:11,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:11,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:10:11,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:11,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:10:11,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:11,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/6cd0c249192741bc8fe98038ea138504 is 50, key is test_row_0/A:col10/1732554611515/Put/seqid=0 2024-11-25T17:10:11,522 DEBUG [Thread-2041 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x301741f1 to 127.0.0.1:56265 2024-11-25T17:10:11,522 DEBUG [Thread-2041 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:11,522 DEBUG [Thread-2049 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5871c039 to 127.0.0.1:56265 2024-11-25T17:10:11,522 DEBUG [Thread-2049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:11,526 DEBUG [Thread-2047 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d0ab200 to 127.0.0.1:56265 2024-11-25T17:10:11,526 DEBUG [Thread-2047 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:11,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742398_1574 (size=12301) 2024-11-25T17:10:11,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-25T17:10:11,597 INFO [Thread-2051 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-11-25T17:10:11,928 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/6cd0c249192741bc8fe98038ea138504 2024-11-25T17:10:11,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/5e54604a244b45afb0fb094a12ee76dc is 50, key is test_row_0/B:col10/1732554611515/Put/seqid=0 2024-11-25T17:10:11,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742399_1575 (size=12301) 2024-11-25T17:10:12,337 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/5e54604a244b45afb0fb094a12ee76dc 2024-11-25T17:10:12,343 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/040f9c440ff140d0b7f2192e0eb63a71 is 50, key is test_row_0/C:col10/1732554611515/Put/seqid=0 2024-11-25T17:10:12,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742400_1576 (size=12301) 2024-11-25T17:10:12,748 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/040f9c440ff140d0b7f2192e0eb63a71 2024-11-25T17:10:12,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/6cd0c249192741bc8fe98038ea138504 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/6cd0c249192741bc8fe98038ea138504 2024-11-25T17:10:12,756 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/6cd0c249192741bc8fe98038ea138504, entries=150, sequenceid=555, filesize=12.0 K 2024-11-25T17:10:12,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/5e54604a244b45afb0fb094a12ee76dc as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/5e54604a244b45afb0fb094a12ee76dc 2024-11-25T17:10:12,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/5e54604a244b45afb0fb094a12ee76dc, entries=150, sequenceid=555, filesize=12.0 K 2024-11-25T17:10:12,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/040f9c440ff140d0b7f2192e0eb63a71 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/040f9c440ff140d0b7f2192e0eb63a71 2024-11-25T17:10:12,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/040f9c440ff140d0b7f2192e0eb63a71, entries=150, sequenceid=555, filesize=12.0 K 2024-11-25T17:10:12,780 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=20.13 KB/20610 for 3c0802cb7cf476d143cab96601b733ab in 1264ms, sequenceid=555, compaction requested=true 2024-11-25T17:10:12,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:12,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:12,780 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:12,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:12,780 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:12,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:12,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:12,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3c0802cb7cf476d143cab96601b733ab:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:12,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:12,781 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47612 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:12,782 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/B is initiating minor compaction (all files) 2024-11-25T17:10:12,782 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/B in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:12,782 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7793c446dc204f75b08dc73e724ea069, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/3362ff3426c5435990ce0c1c6e425553, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/fb7ed60cf4c74226beb553eb900079b7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/5e54604a244b45afb0fb094a12ee76dc] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=46.5 K 2024-11-25T17:10:12,782 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50052 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:12,782 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/A is initiating minor compaction (all files) 2024-11-25T17:10:12,782 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/A in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:12,782 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/95d505d975b94732add7e341a3a6f50d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/acc58f8471c04922a0c39ea098bdbe9d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/6da84ea43c094f9d9980891d8b08bcf2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/6cd0c249192741bc8fe98038ea138504] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=48.9 K 2024-11-25T17:10:12,782 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 7793c446dc204f75b08dc73e724ea069, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732554609736 2024-11-25T17:10:12,783 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95d505d975b94732add7e341a3a6f50d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732554609736 2024-11-25T17:10:12,783 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 3362ff3426c5435990ce0c1c6e425553, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=517, earliestPutTs=1732554609844 2024-11-25T17:10:12,783 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting acc58f8471c04922a0c39ea098bdbe9d, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=517, earliestPutTs=1732554609844 2024-11-25T17:10:12,783 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting fb7ed60cf4c74226beb553eb900079b7, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1732554610516 2024-11-25T17:10:12,783 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6da84ea43c094f9d9980891d8b08bcf2, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1732554610516 2024-11-25T17:10:12,784 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e54604a244b45afb0fb094a12ee76dc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=555, earliestPutTs=1732554610900 2024-11-25T17:10:12,784 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6cd0c249192741bc8fe98038ea138504, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=555, earliestPutTs=1732554610900 2024-11-25T17:10:12,799 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#B#compaction#499 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:12,800 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/b060aaf7233c4f308eb62ab5b8e42b9a is 50, key is test_row_0/B:col10/1732554611515/Put/seqid=0 2024-11-25T17:10:12,800 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#A#compaction#500 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:12,801 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/9168b93c9b294aa891f1f6b0d897db0d is 50, key is test_row_0/A:col10/1732554611515/Put/seqid=0 2024-11-25T17:10:12,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742401_1577 (size=13289) 2024-11-25T17:10:12,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742402_1578 (size=13289) 2024-11-25T17:10:12,847 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/9168b93c9b294aa891f1f6b0d897db0d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/9168b93c9b294aa891f1f6b0d897db0d 2024-11-25T17:10:12,863 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/A of 3c0802cb7cf476d143cab96601b733ab into 9168b93c9b294aa891f1f6b0d897db0d(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:12,863 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:12,863 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/A, priority=12, startTime=1732554612780; duration=0sec 2024-11-25T17:10:12,863 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:12,863 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:A 2024-11-25T17:10:12,863 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:12,864 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47612 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:12,865 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): 3c0802cb7cf476d143cab96601b733ab/C is initiating minor compaction (all files) 2024-11-25T17:10:12,865 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3c0802cb7cf476d143cab96601b733ab/C in TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:12,865 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/7edadb880c994615a4fd43668fd1226c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ac6992545ef54d1c9f3a14ba5feb7187, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/c59affd2b8d948f2ba2b695d817eaa91, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/040f9c440ff140d0b7f2192e0eb63a71] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp, totalSize=46.5 K 2024-11-25T17:10:12,865 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7edadb880c994615a4fd43668fd1226c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732554609736 2024-11-25T17:10:12,866 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac6992545ef54d1c9f3a14ba5feb7187, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=517, earliestPutTs=1732554609844 2024-11-25T17:10:12,866 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting c59affd2b8d948f2ba2b695d817eaa91, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1732554610516 2024-11-25T17:10:12,867 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 040f9c440ff140d0b7f2192e0eb63a71, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=555, earliestPutTs=1732554610900 2024-11-25T17:10:12,891 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3c0802cb7cf476d143cab96601b733ab#C#compaction#501 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:12,892 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/a2903ef16ed24e4095855bea2ece1ecf is 50, key is test_row_0/C:col10/1732554611515/Put/seqid=0 2024-11-25T17:10:12,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742403_1579 (size=13289) 2024-11-25T17:10:12,943 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/a2903ef16ed24e4095855bea2ece1ecf as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a2903ef16ed24e4095855bea2ece1ecf 2024-11-25T17:10:12,982 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/C of 3c0802cb7cf476d143cab96601b733ab into a2903ef16ed24e4095855bea2ece1ecf(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:12,982 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:12,982 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/C, priority=12, startTime=1732554612780; duration=0sec 2024-11-25T17:10:12,982 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:12,982 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:C 2024-11-25T17:10:13,248 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/b060aaf7233c4f308eb62ab5b8e42b9a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/b060aaf7233c4f308eb62ab5b8e42b9a 2024-11-25T17:10:13,254 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3c0802cb7cf476d143cab96601b733ab/B of 3c0802cb7cf476d143cab96601b733ab into b060aaf7233c4f308eb62ab5b8e42b9a(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:13,254 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:13,254 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab., storeName=3c0802cb7cf476d143cab96601b733ab/B, priority=12, startTime=1732554612780; duration=0sec 2024-11-25T17:10:13,254 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:13,254 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3c0802cb7cf476d143cab96601b733ab:B 2024-11-25T17:10:15,150 DEBUG [Thread-2043 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x63cefe40 to 127.0.0.1:56265 2024-11-25T17:10:15,150 DEBUG [Thread-2043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 47 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 98 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 112 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 114 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1603 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4809 rows 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1616 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4848 rows 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1608 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4824 rows 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1628 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4884 rows 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1616 2024-11-25T17:10:15,150 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4848 rows 2024-11-25T17:10:15,150 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-25T17:10:15,150 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3cb726fe to 127.0.0.1:56265 2024-11-25T17:10:15,150 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:15,154 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-25T17:10:15,155 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-25T17:10:15,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-25T17:10:15,160 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554615160"}]},"ts":"1732554615160"} 2024-11-25T17:10:15,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-25T17:10:15,162 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-25T17:10:15,165 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-25T17:10:15,165 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-25T17:10:15,167 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c0802cb7cf476d143cab96601b733ab, UNASSIGN}] 2024-11-25T17:10:15,168 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c0802cb7cf476d143cab96601b733ab, UNASSIGN 2024-11-25T17:10:15,168 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=3c0802cb7cf476d143cab96601b733ab, regionState=CLOSING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:10:15,170 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-25T17:10:15,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; CloseRegionProcedure 3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:10:15,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-25T17:10:15,321 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:15,322 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(124): Close 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:15,322 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-25T17:10:15,322 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1681): Closing 3c0802cb7cf476d143cab96601b733ab, disabling compactions & flushes 2024-11-25T17:10:15,322 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:15,322 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:15,322 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. after waiting 0 ms 2024-11-25T17:10:15,322 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:15,322 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(2837): Flushing 3c0802cb7cf476d143cab96601b733ab 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-25T17:10:15,322 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=A 2024-11-25T17:10:15,322 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:15,322 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=B 2024-11-25T17:10:15,322 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:15,322 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3c0802cb7cf476d143cab96601b733ab, store=C 2024-11-25T17:10:15,322 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:15,326 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/9772d704c7c349318de3c36f0751f3f6 is 50, key is test_row_1/A:col10/1732554611523/Put/seqid=0 2024-11-25T17:10:15,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742404_1580 (size=9857) 2024-11-25T17:10:15,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-25T17:10:15,730 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/9772d704c7c349318de3c36f0751f3f6 2024-11-25T17:10:15,747 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/48999c75da4e4592a9a720707d3d3434 is 50, key is test_row_1/B:col10/1732554611523/Put/seqid=0 2024-11-25T17:10:15,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742405_1581 (size=9857) 2024-11-25T17:10:15,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-25T17:10:16,158 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/48999c75da4e4592a9a720707d3d3434 2024-11-25T17:10:16,165 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/2161326290514d2d8192490b7a7bbcf2 is 50, key is test_row_1/C:col10/1732554611523/Put/seqid=0 2024-11-25T17:10:16,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742406_1582 (size=9857) 2024-11-25T17:10:16,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-25T17:10:16,579 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/2161326290514d2d8192490b7a7bbcf2 2024-11-25T17:10:16,582 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/A/9772d704c7c349318de3c36f0751f3f6 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/9772d704c7c349318de3c36f0751f3f6 2024-11-25T17:10:16,585 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/9772d704c7c349318de3c36f0751f3f6, entries=100, sequenceid=565, filesize=9.6 K 2024-11-25T17:10:16,585 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/B/48999c75da4e4592a9a720707d3d3434 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/48999c75da4e4592a9a720707d3d3434 2024-11-25T17:10:16,587 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/48999c75da4e4592a9a720707d3d3434, entries=100, sequenceid=565, filesize=9.6 K 2024-11-25T17:10:16,588 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/.tmp/C/2161326290514d2d8192490b7a7bbcf2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/2161326290514d2d8192490b7a7bbcf2 2024-11-25T17:10:16,590 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/2161326290514d2d8192490b7a7bbcf2, entries=100, sequenceid=565, filesize=9.6 K 2024-11-25T17:10:16,591 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 3c0802cb7cf476d143cab96601b733ab in 1268ms, sequenceid=565, compaction requested=false 2024-11-25T17:10:16,591 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/f310ddc7b9a840cebad55f67eccde37e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/bd4a280e001f4a89bedd202cc59e4784, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e0e303a330bd4d70a69ac19dbb5ad34c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/fc36575f1b5b4f9f9ef6c1c93f0a16b1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/8d2b8a2317394b519bebe4b16599d25f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/fd79c98c6913486c98f9f53f503344f1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/0bb78c3634724240be257d9d641c60a1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/29dab1e864fe4e19b397ccf0173ca7fd, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/f7ee925c8f874027b1b62c7e520989bb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/44a30f6e18cb4faf98a6ab6ca32d3715, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/5e3e00b2df26449995cbe228e0ab990e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/849f47db064b464ca656a1c8501ace67, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/a9aa92db24874235b89677443a24d436, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/9e582432168e45df8abf991c1907f44f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3f374274f3824e1db72c9dca7a14167b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/47c140eac9204d6d9fafd25b5037e0a7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3c617f072cf54a2aa86880be84eafdd4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/bb3201198c844641b600187a92ded3e1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/d3ab4150f6894af09477534c2a14672c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e020a376f0c84761af6f4d10c6440047, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/0ecb196534d34e85951fd9a821f56ad8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4abf4c8ac9c641c2a41a9630884059e0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/546cb7668d73410dbdd6478b10f9650f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4b303f368f9e4fb7a275731cf8324765, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/b4cac5d3844545c382a46418d52f49de, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/95d505d975b94732add7e341a3a6f50d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/acc58f8471c04922a0c39ea098bdbe9d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/6da84ea43c094f9d9980891d8b08bcf2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/6cd0c249192741bc8fe98038ea138504] to archive 2024-11-25T17:10:16,592 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:10:16,593 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/f310ddc7b9a840cebad55f67eccde37e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/f310ddc7b9a840cebad55f67eccde37e 2024-11-25T17:10:16,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/bd4a280e001f4a89bedd202cc59e4784 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/bd4a280e001f4a89bedd202cc59e4784 2024-11-25T17:10:16,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e0e303a330bd4d70a69ac19dbb5ad34c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e0e303a330bd4d70a69ac19dbb5ad34c 2024-11-25T17:10:16,595 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/fc36575f1b5b4f9f9ef6c1c93f0a16b1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/fc36575f1b5b4f9f9ef6c1c93f0a16b1 2024-11-25T17:10:16,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/8d2b8a2317394b519bebe4b16599d25f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/8d2b8a2317394b519bebe4b16599d25f 2024-11-25T17:10:16,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/fd79c98c6913486c98f9f53f503344f1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/fd79c98c6913486c98f9f53f503344f1 2024-11-25T17:10:16,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/0bb78c3634724240be257d9d641c60a1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/0bb78c3634724240be257d9d641c60a1 2024-11-25T17:10:16,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/29dab1e864fe4e19b397ccf0173ca7fd to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/29dab1e864fe4e19b397ccf0173ca7fd 2024-11-25T17:10:16,600 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/f7ee925c8f874027b1b62c7e520989bb to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/f7ee925c8f874027b1b62c7e520989bb 2024-11-25T17:10:16,601 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/44a30f6e18cb4faf98a6ab6ca32d3715 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/44a30f6e18cb4faf98a6ab6ca32d3715 2024-11-25T17:10:16,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/5e3e00b2df26449995cbe228e0ab990e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/5e3e00b2df26449995cbe228e0ab990e 2024-11-25T17:10:16,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/849f47db064b464ca656a1c8501ace67 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/849f47db064b464ca656a1c8501ace67 2024-11-25T17:10:16,603 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/a9aa92db24874235b89677443a24d436 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/a9aa92db24874235b89677443a24d436 2024-11-25T17:10:16,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/9e582432168e45df8abf991c1907f44f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/9e582432168e45df8abf991c1907f44f 2024-11-25T17:10:16,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3f374274f3824e1db72c9dca7a14167b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3f374274f3824e1db72c9dca7a14167b 2024-11-25T17:10:16,605 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/47c140eac9204d6d9fafd25b5037e0a7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/47c140eac9204d6d9fafd25b5037e0a7 2024-11-25T17:10:16,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3c617f072cf54a2aa86880be84eafdd4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/3c617f072cf54a2aa86880be84eafdd4 2024-11-25T17:10:16,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/bb3201198c844641b600187a92ded3e1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/bb3201198c844641b600187a92ded3e1 2024-11-25T17:10:16,607 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/d3ab4150f6894af09477534c2a14672c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/d3ab4150f6894af09477534c2a14672c 2024-11-25T17:10:16,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e020a376f0c84761af6f4d10c6440047 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/e020a376f0c84761af6f4d10c6440047 2024-11-25T17:10:16,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/0ecb196534d34e85951fd9a821f56ad8 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/0ecb196534d34e85951fd9a821f56ad8 2024-11-25T17:10:16,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4abf4c8ac9c641c2a41a9630884059e0 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4abf4c8ac9c641c2a41a9630884059e0 2024-11-25T17:10:16,610 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/546cb7668d73410dbdd6478b10f9650f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/546cb7668d73410dbdd6478b10f9650f 2024-11-25T17:10:16,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4b303f368f9e4fb7a275731cf8324765 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/4b303f368f9e4fb7a275731cf8324765 2024-11-25T17:10:16,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/b4cac5d3844545c382a46418d52f49de to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/b4cac5d3844545c382a46418d52f49de 2024-11-25T17:10:16,612 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/95d505d975b94732add7e341a3a6f50d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/95d505d975b94732add7e341a3a6f50d 2024-11-25T17:10:16,613 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/acc58f8471c04922a0c39ea098bdbe9d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/acc58f8471c04922a0c39ea098bdbe9d 2024-11-25T17:10:16,613 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/6da84ea43c094f9d9980891d8b08bcf2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/6da84ea43c094f9d9980891d8b08bcf2 2024-11-25T17:10:16,614 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/6cd0c249192741bc8fe98038ea138504 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/6cd0c249192741bc8fe98038ea138504 2024-11-25T17:10:16,615 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/a4b9de6250124181b4fcd884aa4808bf, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/d91b698f1e6e412c8bf3ff7ab173ddc1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/b39d3c1adc4b4142ab8b2288fea35a8e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/51da9c682637435594f8d96e1a4eafac, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/9cbd37c25d0140e1b3152631a07c5737, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/c5d7d15e3dfb49db904b951e0b661a41, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/cacf422c0d7745a8990ee82325864071, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/45aaa3b6d7b74ceca27aa1fd7af27df4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/e1ce42f9f4c44b48834083051e29ce74, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/bee4138823444c55bb16c913c0efec4d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/003b2482634b459fa7fca93282f8a368, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/74e0db895016422a90a56cd52a42fcc7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/48db6144c0294fe0b0699fdfc067b12c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/71bd23e72af4442899cf92710b5f0ac1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/23d885a7722a4e93a2dec6ccf74bb5c7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/ac61ff0a0ae4466a8b3a26614a53815a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/cea0b8dca4564f90ace486e110e19493, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6519633562094b448eaa350d0158f581, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/243c549489c4426a9f7b512194738936, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/9858e2d19ffd44ef81337b00857478ba, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6f9b603a68d1442b8ac784e8ca3e5c99, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/0a739ba7f8b34feeb38388af3be792d3, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/71e96a7ee3fc45cabc401f3b4c55dd78, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/0af2c884a917420096742d374942199e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7793c446dc204f75b08dc73e724ea069, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/1293a21f4b304cc59e922c0f3e59d059, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/3362ff3426c5435990ce0c1c6e425553, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/fb7ed60cf4c74226beb553eb900079b7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/5e54604a244b45afb0fb094a12ee76dc] to archive 2024-11-25T17:10:16,616 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:10:16,617 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/a4b9de6250124181b4fcd884aa4808bf to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/a4b9de6250124181b4fcd884aa4808bf 2024-11-25T17:10:16,617 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/d91b698f1e6e412c8bf3ff7ab173ddc1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/d91b698f1e6e412c8bf3ff7ab173ddc1 2024-11-25T17:10:16,618 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/b39d3c1adc4b4142ab8b2288fea35a8e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/b39d3c1adc4b4142ab8b2288fea35a8e 2024-11-25T17:10:16,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/51da9c682637435594f8d96e1a4eafac to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/51da9c682637435594f8d96e1a4eafac 2024-11-25T17:10:16,620 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/9cbd37c25d0140e1b3152631a07c5737 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/9cbd37c25d0140e1b3152631a07c5737 2024-11-25T17:10:16,620 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/c5d7d15e3dfb49db904b951e0b661a41 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/c5d7d15e3dfb49db904b951e0b661a41 2024-11-25T17:10:16,621 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/cacf422c0d7745a8990ee82325864071 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/cacf422c0d7745a8990ee82325864071 2024-11-25T17:10:16,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/45aaa3b6d7b74ceca27aa1fd7af27df4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/45aaa3b6d7b74ceca27aa1fd7af27df4 2024-11-25T17:10:16,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/e1ce42f9f4c44b48834083051e29ce74 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/e1ce42f9f4c44b48834083051e29ce74 2024-11-25T17:10:16,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/bee4138823444c55bb16c913c0efec4d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/bee4138823444c55bb16c913c0efec4d 2024-11-25T17:10:16,624 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/003b2482634b459fa7fca93282f8a368 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/003b2482634b459fa7fca93282f8a368 2024-11-25T17:10:16,625 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/74e0db895016422a90a56cd52a42fcc7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/74e0db895016422a90a56cd52a42fcc7 2024-11-25T17:10:16,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/48db6144c0294fe0b0699fdfc067b12c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/48db6144c0294fe0b0699fdfc067b12c 2024-11-25T17:10:16,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/71bd23e72af4442899cf92710b5f0ac1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/71bd23e72af4442899cf92710b5f0ac1 2024-11-25T17:10:16,627 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/23d885a7722a4e93a2dec6ccf74bb5c7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/23d885a7722a4e93a2dec6ccf74bb5c7 2024-11-25T17:10:16,628 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/ac61ff0a0ae4466a8b3a26614a53815a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/ac61ff0a0ae4466a8b3a26614a53815a 2024-11-25T17:10:16,628 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/cea0b8dca4564f90ace486e110e19493 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/cea0b8dca4564f90ace486e110e19493 2024-11-25T17:10:16,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6519633562094b448eaa350d0158f581 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6519633562094b448eaa350d0158f581 2024-11-25T17:10:16,630 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/243c549489c4426a9f7b512194738936 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/243c549489c4426a9f7b512194738936 2024-11-25T17:10:16,631 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/9858e2d19ffd44ef81337b00857478ba to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/9858e2d19ffd44ef81337b00857478ba 2024-11-25T17:10:16,631 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6f9b603a68d1442b8ac784e8ca3e5c99 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/6f9b603a68d1442b8ac784e8ca3e5c99 2024-11-25T17:10:16,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/0a739ba7f8b34feeb38388af3be792d3 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/0a739ba7f8b34feeb38388af3be792d3 2024-11-25T17:10:16,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/71e96a7ee3fc45cabc401f3b4c55dd78 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/71e96a7ee3fc45cabc401f3b4c55dd78 2024-11-25T17:10:16,634 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/0af2c884a917420096742d374942199e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/0af2c884a917420096742d374942199e 2024-11-25T17:10:16,634 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7793c446dc204f75b08dc73e724ea069 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/7793c446dc204f75b08dc73e724ea069 2024-11-25T17:10:16,635 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/1293a21f4b304cc59e922c0f3e59d059 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/1293a21f4b304cc59e922c0f3e59d059 2024-11-25T17:10:16,636 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/3362ff3426c5435990ce0c1c6e425553 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/3362ff3426c5435990ce0c1c6e425553 2024-11-25T17:10:16,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/fb7ed60cf4c74226beb553eb900079b7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/fb7ed60cf4c74226beb553eb900079b7 2024-11-25T17:10:16,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/5e54604a244b45afb0fb094a12ee76dc to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/5e54604a244b45afb0fb094a12ee76dc 2024-11-25T17:10:16,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ce6446f78d8942d6bf06a7990133ecdc, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/99558f67e4cd42d08284089b12165fe7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6a334bbb699141ada0d68b2fa1ae9f08, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/63fb7b2d2f584dd2bc98b3cd8838cfbc, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ddb0c45675ed4cb59388e4f2363b0423, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/884ad72b6d0243698fe1d263a364eb67, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6674743537de4276a496a39ccf4d2323, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a86fc083c00b48d5b82cf879d8e63e7e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6c982b410059498483d7b280d17b57a1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/9bc59d1d291b49c3865d5952791e5d04, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/39b9f686546347b7a8b7f3f0d346e401, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/2ce8f00324ad451490e756a6e036c408, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/65b7fe1f639949608e6b7be891101762, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/8b06d78687b643baa98f08143d180262, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/1d2ebae810d04784a35bb9cf51b9dd4b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/7d00b34468b841ee8376b47f8d96cf79, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/67f11ab29b944419bbd5fb00e7986203, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/45fd27d1a9764d709c2da3c1bc028b2d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/7e4402c40f1f4954ac919253f5fc3d44, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/af11991af1194c71b6313aefb8c433ce, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a7a9a7dd9fe94a17bda53ad9c7e71ec5, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/e7433443f859442284f635fb4453c12d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6bb51c844c0b4c00b9822626ec8b5a6b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a20fb4be0742459c891f3dc8a06992f8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/7edadb880c994615a4fd43668fd1226c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/2993b560c8ca4aa2b369030cfd25be97, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ac6992545ef54d1c9f3a14ba5feb7187, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/c59affd2b8d948f2ba2b695d817eaa91, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/040f9c440ff140d0b7f2192e0eb63a71] to archive 2024-11-25T17:10:16,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:10:16,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ce6446f78d8942d6bf06a7990133ecdc to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ce6446f78d8942d6bf06a7990133ecdc 2024-11-25T17:10:16,643 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/99558f67e4cd42d08284089b12165fe7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/99558f67e4cd42d08284089b12165fe7 2024-11-25T17:10:16,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6a334bbb699141ada0d68b2fa1ae9f08 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6a334bbb699141ada0d68b2fa1ae9f08 2024-11-25T17:10:16,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/63fb7b2d2f584dd2bc98b3cd8838cfbc to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/63fb7b2d2f584dd2bc98b3cd8838cfbc 2024-11-25T17:10:16,645 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ddb0c45675ed4cb59388e4f2363b0423 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ddb0c45675ed4cb59388e4f2363b0423 2024-11-25T17:10:16,646 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/884ad72b6d0243698fe1d263a364eb67 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/884ad72b6d0243698fe1d263a364eb67 2024-11-25T17:10:16,647 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6674743537de4276a496a39ccf4d2323 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6674743537de4276a496a39ccf4d2323 2024-11-25T17:10:16,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a86fc083c00b48d5b82cf879d8e63e7e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a86fc083c00b48d5b82cf879d8e63e7e 2024-11-25T17:10:16,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6c982b410059498483d7b280d17b57a1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6c982b410059498483d7b280d17b57a1 2024-11-25T17:10:16,649 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/9bc59d1d291b49c3865d5952791e5d04 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/9bc59d1d291b49c3865d5952791e5d04 2024-11-25T17:10:16,650 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/39b9f686546347b7a8b7f3f0d346e401 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/39b9f686546347b7a8b7f3f0d346e401 2024-11-25T17:10:16,651 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/2ce8f00324ad451490e756a6e036c408 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/2ce8f00324ad451490e756a6e036c408 2024-11-25T17:10:16,652 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/65b7fe1f639949608e6b7be891101762 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/65b7fe1f639949608e6b7be891101762 2024-11-25T17:10:16,653 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/8b06d78687b643baa98f08143d180262 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/8b06d78687b643baa98f08143d180262 2024-11-25T17:10:16,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/1d2ebae810d04784a35bb9cf51b9dd4b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/1d2ebae810d04784a35bb9cf51b9dd4b 2024-11-25T17:10:16,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/7d00b34468b841ee8376b47f8d96cf79 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/7d00b34468b841ee8376b47f8d96cf79 2024-11-25T17:10:16,655 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/67f11ab29b944419bbd5fb00e7986203 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/67f11ab29b944419bbd5fb00e7986203 2024-11-25T17:10:16,656 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/45fd27d1a9764d709c2da3c1bc028b2d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/45fd27d1a9764d709c2da3c1bc028b2d 2024-11-25T17:10:16,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/7e4402c40f1f4954ac919253f5fc3d44 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/7e4402c40f1f4954ac919253f5fc3d44 2024-11-25T17:10:16,658 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/af11991af1194c71b6313aefb8c433ce to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/af11991af1194c71b6313aefb8c433ce 2024-11-25T17:10:16,659 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a7a9a7dd9fe94a17bda53ad9c7e71ec5 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a7a9a7dd9fe94a17bda53ad9c7e71ec5 2024-11-25T17:10:16,660 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/e7433443f859442284f635fb4453c12d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/e7433443f859442284f635fb4453c12d 2024-11-25T17:10:16,661 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6bb51c844c0b4c00b9822626ec8b5a6b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/6bb51c844c0b4c00b9822626ec8b5a6b 2024-11-25T17:10:16,662 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a20fb4be0742459c891f3dc8a06992f8 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a20fb4be0742459c891f3dc8a06992f8 2024-11-25T17:10:16,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/7edadb880c994615a4fd43668fd1226c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/7edadb880c994615a4fd43668fd1226c 2024-11-25T17:10:16,664 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/2993b560c8ca4aa2b369030cfd25be97 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/2993b560c8ca4aa2b369030cfd25be97 2024-11-25T17:10:16,665 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ac6992545ef54d1c9f3a14ba5feb7187 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/ac6992545ef54d1c9f3a14ba5feb7187 2024-11-25T17:10:16,666 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/c59affd2b8d948f2ba2b695d817eaa91 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/c59affd2b8d948f2ba2b695d817eaa91 2024-11-25T17:10:16,667 DEBUG [StoreCloser-TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/040f9c440ff140d0b7f2192e0eb63a71 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/040f9c440ff140d0b7f2192e0eb63a71 2024-11-25T17:10:16,670 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/recovered.edits/568.seqid, newMaxSeqId=568, maxSeqId=1 2024-11-25T17:10:16,671 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab. 2024-11-25T17:10:16,671 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1635): Region close journal for 3c0802cb7cf476d143cab96601b733ab: 2024-11-25T17:10:16,672 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(170): Closed 3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:16,673 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=3c0802cb7cf476d143cab96601b733ab, regionState=CLOSED 2024-11-25T17:10:16,675 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-25T17:10:16,675 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; CloseRegionProcedure 3c0802cb7cf476d143cab96601b733ab, server=6579369734b6,41865,1732554474464 in 1.5030 sec 2024-11-25T17:10:16,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-11-25T17:10:16,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3c0802cb7cf476d143cab96601b733ab, UNASSIGN in 1.5080 sec 2024-11-25T17:10:16,677 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-25T17:10:16,677 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5110 sec 2024-11-25T17:10:16,678 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554616678"}]},"ts":"1732554616678"} 2024-11-25T17:10:16,679 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-25T17:10:16,681 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-25T17:10:16,682 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5270 sec 2024-11-25T17:10:17,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-25T17:10:17,270 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 152 completed 2024-11-25T17:10:17,271 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-25T17:10:17,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:10:17,273 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:10:17,273 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=156, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:10:17,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-25T17:10:17,276 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:17,282 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/recovered.edits] 2024-11-25T17:10:17,286 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/9168b93c9b294aa891f1f6b0d897db0d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/9168b93c9b294aa891f1f6b0d897db0d 2024-11-25T17:10:17,288 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/9772d704c7c349318de3c36f0751f3f6 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/A/9772d704c7c349318de3c36f0751f3f6 2024-11-25T17:10:17,292 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/48999c75da4e4592a9a720707d3d3434 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/48999c75da4e4592a9a720707d3d3434 2024-11-25T17:10:17,294 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/b060aaf7233c4f308eb62ab5b8e42b9a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/B/b060aaf7233c4f308eb62ab5b8e42b9a 2024-11-25T17:10:17,309 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/2161326290514d2d8192490b7a7bbcf2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/2161326290514d2d8192490b7a7bbcf2 2024-11-25T17:10:17,314 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a2903ef16ed24e4095855bea2ece1ecf to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/C/a2903ef16ed24e4095855bea2ece1ecf 2024-11-25T17:10:17,316 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/recovered.edits/568.seqid to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab/recovered.edits/568.seqid 2024-11-25T17:10:17,317 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/3c0802cb7cf476d143cab96601b733ab 2024-11-25T17:10:17,317 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-25T17:10:17,321 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=156, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:10:17,331 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-25T17:10:17,333 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-25T17:10:17,334 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=156, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:10:17,334 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-25T17:10:17,334 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732554617334"}]},"ts":"9223372036854775807"} 2024-11-25T17:10:17,351 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-25T17:10:17,351 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3c0802cb7cf476d143cab96601b733ab, NAME => 'TestAcidGuarantees,,1732554589728.3c0802cb7cf476d143cab96601b733ab.', STARTKEY => '', ENDKEY => ''}] 2024-11-25T17:10:17,351 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-25T17:10:17,351 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732554617351"}]},"ts":"9223372036854775807"} 2024-11-25T17:10:17,355 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-25T17:10:17,359 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=156, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:10:17,361 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 89 msec 2024-11-25T17:10:17,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-25T17:10:17,378 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 156 completed 2024-11-25T17:10:17,392 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=241 (was 241), OpenFileDescriptor=459 (was 463), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=856 (was 691) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1482 (was 3340) 2024-11-25T17:10:17,407 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=241, OpenFileDescriptor=459, MaxFileDescriptor=1048576, SystemLoadAverage=856, ProcessCount=11, AvailableMemoryMB=1481 2024-11-25T17:10:17,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-25T17:10:17,410 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T17:10:17,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-25T17:10:17,412 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T17:10:17,412 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:17,412 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 157 2024-11-25T17:10:17,413 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T17:10:17,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-25T17:10:17,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742407_1583 (size=963) 2024-11-25T17:10:17,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-25T17:10:17,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-25T17:10:17,826 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4 2024-11-25T17:10:17,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742408_1584 (size=53) 2024-11-25T17:10:18,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-25T17:10:18,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:10:18,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing c2213e06f0c6c3750162aafa4b26c5ef, disabling compactions & flushes 2024-11-25T17:10:18,232 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:18,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:18,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. after waiting 0 ms 2024-11-25T17:10:18,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:18,232 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:18,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:18,233 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T17:10:18,233 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732554618233"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732554618233"}]},"ts":"1732554618233"} 2024-11-25T17:10:18,236 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-25T17:10:18,236 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T17:10:18,236 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554618236"}]},"ts":"1732554618236"} 2024-11-25T17:10:18,237 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-25T17:10:18,242 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c2213e06f0c6c3750162aafa4b26c5ef, ASSIGN}] 2024-11-25T17:10:18,243 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c2213e06f0c6c3750162aafa4b26c5ef, ASSIGN 2024-11-25T17:10:18,243 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=c2213e06f0c6c3750162aafa4b26c5ef, ASSIGN; state=OFFLINE, location=6579369734b6,41865,1732554474464; forceNewPlan=false, retain=false 2024-11-25T17:10:18,394 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=c2213e06f0c6c3750162aafa4b26c5ef, regionState=OPENING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:10:18,395 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE; OpenRegionProcedure c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:10:18,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-25T17:10:18,547 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:18,551 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:18,551 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(7285): Opening region: {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} 2024-11-25T17:10:18,551 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:18,551 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:10:18,552 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(7327): checking encryption for c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:18,552 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(7330): checking classloading for c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:18,553 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:18,554 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:10:18,555 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c2213e06f0c6c3750162aafa4b26c5ef columnFamilyName A 2024-11-25T17:10:18,555 DEBUG [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:18,556 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.HStore(327): Store=c2213e06f0c6c3750162aafa4b26c5ef/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:10:18,556 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:18,557 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:10:18,557 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c2213e06f0c6c3750162aafa4b26c5ef columnFamilyName B 2024-11-25T17:10:18,557 DEBUG [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:18,558 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.HStore(327): Store=c2213e06f0c6c3750162aafa4b26c5ef/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:10:18,558 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:18,560 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:10:18,560 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c2213e06f0c6c3750162aafa4b26c5ef columnFamilyName C 2024-11-25T17:10:18,560 DEBUG [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:18,560 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.HStore(327): Store=c2213e06f0c6c3750162aafa4b26c5ef/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:10:18,560 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:18,561 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:18,561 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:18,563 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T17:10:18,564 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(1085): writing seq id for c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:18,566 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T17:10:18,567 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(1102): Opened c2213e06f0c6c3750162aafa4b26c5ef; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65588694, jitterRate=-0.02265229821205139}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T17:10:18,568 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(1001): Region open journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:18,568 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., pid=159, masterSystemTime=1732554618547 2024-11-25T17:10:18,570 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:18,570 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:18,570 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=c2213e06f0c6c3750162aafa4b26c5ef, regionState=OPEN, openSeqNum=2, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:10:18,573 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-11-25T17:10:18,573 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; OpenRegionProcedure c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 in 176 msec 2024-11-25T17:10:18,574 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-11-25T17:10:18,575 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c2213e06f0c6c3750162aafa4b26c5ef, ASSIGN in 332 msec 2024-11-25T17:10:18,575 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T17:10:18,575 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554618575"}]},"ts":"1732554618575"} 2024-11-25T17:10:18,576 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-25T17:10:18,582 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T17:10:18,583 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1720 sec 2024-11-25T17:10:19,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-25T17:10:19,518 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-11-25T17:10:19,519 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x635b1751 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@593af048 2024-11-25T17:10:19,531 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cbd2497, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:10:19,538 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:10:19,539 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45192, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:10:19,545 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T17:10:19,546 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44336, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T17:10:19,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-25T17:10:19,549 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.3 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T17:10:19,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=160, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-25T17:10:19,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742409_1585 (size=999) 2024-11-25T17:10:19,981 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-25T17:10:19,981 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-25T17:10:19,987 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-25T17:10:19,998 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c2213e06f0c6c3750162aafa4b26c5ef, REOPEN/MOVE}] 2024-11-25T17:10:20,001 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c2213e06f0c6c3750162aafa4b26c5ef, REOPEN/MOVE 2024-11-25T17:10:20,005 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=162 updating hbase:meta row=c2213e06f0c6c3750162aafa4b26c5ef, regionState=CLOSING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:10:20,010 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-25T17:10:20,010 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE; CloseRegionProcedure c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:10:20,169 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:20,170 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(124): Close c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:20,170 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-25T17:10:20,170 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1681): Closing c2213e06f0c6c3750162aafa4b26c5ef, disabling compactions & flushes 2024-11-25T17:10:20,170 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:20,170 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:20,170 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. after waiting 0 ms 2024-11-25T17:10:20,170 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:20,206 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-25T17:10:20,208 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:20,208 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1635): Region close journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:20,208 WARN [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegionServer(3786): Not adding moved region record: c2213e06f0c6c3750162aafa4b26c5ef to self. 2024-11-25T17:10:20,211 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=162 updating hbase:meta row=c2213e06f0c6c3750162aafa4b26c5ef, regionState=CLOSED 2024-11-25T17:10:20,211 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(170): Closed c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:20,213 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=163, resume processing ppid=162 2024-11-25T17:10:20,213 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; CloseRegionProcedure c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 in 202 msec 2024-11-25T17:10:20,214 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=c2213e06f0c6c3750162aafa4b26c5ef, REOPEN/MOVE; state=CLOSED, location=6579369734b6,41865,1732554474464; forceNewPlan=false, retain=true 2024-11-25T17:10:20,365 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=162 updating hbase:meta row=c2213e06f0c6c3750162aafa4b26c5ef, regionState=OPENING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:10:20,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=162, state=RUNNABLE; OpenRegionProcedure c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:10:20,518 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:20,537 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:20,537 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(7285): Opening region: {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} 2024-11-25T17:10:20,538 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:20,538 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T17:10:20,540 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(7327): checking encryption for c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:20,540 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(7330): checking classloading for c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:20,542 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:20,543 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:10:20,543 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c2213e06f0c6c3750162aafa4b26c5ef columnFamilyName A 2024-11-25T17:10:20,546 DEBUG [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:20,547 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.HStore(327): Store=c2213e06f0c6c3750162aafa4b26c5ef/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:10:20,550 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:20,554 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:10:20,554 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c2213e06f0c6c3750162aafa4b26c5ef columnFamilyName B 2024-11-25T17:10:20,554 DEBUG [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:20,555 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.HStore(327): Store=c2213e06f0c6c3750162aafa4b26c5ef/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:10:20,555 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:20,556 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-25T17:10:20,556 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c2213e06f0c6c3750162aafa4b26c5ef columnFamilyName C 2024-11-25T17:10:20,556 DEBUG [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:20,556 INFO [StoreOpener-c2213e06f0c6c3750162aafa4b26c5ef-1 {}] regionserver.HStore(327): Store=c2213e06f0c6c3750162aafa4b26c5ef/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T17:10:20,558 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:20,560 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:20,562 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:20,565 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T17:10:20,568 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(1085): writing seq id for c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:20,569 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(1102): Opened c2213e06f0c6c3750162aafa4b26c5ef; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74141134, jitterRate=0.10478898882865906}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T17:10:20,570 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegion(1001): Region open journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:20,573 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., pid=164, masterSystemTime=1732554620518 2024-11-25T17:10:20,578 DEBUG [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:20,578 INFO [RS_OPEN_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_OPEN_REGION, pid=164}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:20,578 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=162 updating hbase:meta row=c2213e06f0c6c3750162aafa4b26c5ef, regionState=OPEN, openSeqNum=5, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:10:20,581 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=162 2024-11-25T17:10:20,581 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=162, state=SUCCESS; OpenRegionProcedure c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 in 213 msec 2024-11-25T17:10:20,582 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-25T17:10:20,582 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c2213e06f0c6c3750162aafa4b26c5ef, REOPEN/MOVE in 583 msec 2024-11-25T17:10:20,584 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-11-25T17:10:20,584 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 596 msec 2024-11-25T17:10:20,587 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 1.0360 sec 2024-11-25T17:10:20,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-11-25T17:10:20,590 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2cbfd84f to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2209c520 2024-11-25T17:10:20,594 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5765d46a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:10:20,596 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3fb684eb to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@537a66f8 2024-11-25T17:10:20,642 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ac53e79, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:10:20,644 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0644b7e6 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6094c70 2024-11-25T17:10:20,669 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bc9c3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:10:20,671 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c9b5141 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@103dfc6e 2024-11-25T17:10:20,698 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7181df3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:10:20,699 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11a52cdf to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e047c09 2024-11-25T17:10:20,706 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11030ef5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:10:20,707 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60d631a3 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@69abefea 2024-11-25T17:10:20,720 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b914bf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:10:20,721 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58971172 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e757135 2024-11-25T17:10:20,746 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f6a59e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:10:20,747 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d7fe93b to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7846cb78 2024-11-25T17:10:20,781 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@150e08ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:10:20,782 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11c440f7 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f1754bc 2024-11-25T17:10:20,808 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a3b66d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:10:20,809 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58460ef3 to 127.0.0.1:56265 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d9113f3 2024-11-25T17:10:20,837 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cfdf76c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T17:10:20,877 DEBUG [hconnection-0x914723-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:10:20,879 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45194, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:10:20,889 DEBUG [hconnection-0x2976400b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:10:20,891 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45198, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:10:20,897 DEBUG [hconnection-0x767a8b76-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:10:20,899 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45204, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:10:20,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:20,900 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:10:20,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:20,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:20,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:20,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:20,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:20,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:20,903 DEBUG [hconnection-0x7cc4e1d4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:10:20,905 DEBUG [hconnection-0x92f6816-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:10:20,905 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45208, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:10:20,907 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45220, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:10:20,909 DEBUG [hconnection-0x2402e316-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:10:20,912 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45232, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:10:20,919 DEBUG [hconnection-0x28a76a9b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:10:20,922 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45246, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:10:20,925 DEBUG [hconnection-0x133606a1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:10:20,926 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45258, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:10:20,936 DEBUG [hconnection-0x3734b559-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:10:20,938 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45272, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:10:20,938 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:10:20,939 DEBUG [hconnection-0x4fc35d58-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T17:10:20,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-25T17:10:20,940 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45274, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T17:10:20,941 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:10:20,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-25T17:10:20,942 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:10:20,942 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:10:20,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:20,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554680942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:20,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:20,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554680946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:20,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:20,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554680947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:20,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:20,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554680948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:20,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:20,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554680949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:20,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125da2dc5f935ea49398e054b31ac00f211_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554620898/Put/seqid=0 2024-11-25T17:10:21,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742410_1586 (size=12154) 2024-11-25T17:10:21,011 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:21,018 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125da2dc5f935ea49398e054b31ac00f211_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125da2dc5f935ea49398e054b31ac00f211_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:21,019 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/88c6522200574c3ab000210c50a32a31, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:21,020 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/88c6522200574c3ab000210c50a32a31 is 175, key is test_row_0/A:col10/1732554620898/Put/seqid=0 2024-11-25T17:10:21,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742411_1587 (size=30955) 2024-11-25T17:10:21,040 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/88c6522200574c3ab000210c50a32a31 2024-11-25T17:10:21,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-25T17:10:21,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554681051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554681054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554681055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554681057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554681059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/f3a91a9458fa49a687dbf99a961414b0 is 50, key is test_row_0/B:col10/1732554620898/Put/seqid=0 2024-11-25T17:10:21,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742412_1588 (size=12001) 2024-11-25T17:10:21,077 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/f3a91a9458fa49a687dbf99a961414b0 2024-11-25T17:10:21,093 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:21,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-25T17:10:21,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:21,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:21,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:21,094 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,127 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/49064be13a0d456b8ec8a9faf2852ed4 is 50, key is test_row_0/C:col10/1732554620898/Put/seqid=0 2024-11-25T17:10:21,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742413_1589 (size=12001) 2024-11-25T17:10:21,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/49064be13a0d456b8ec8a9faf2852ed4 2024-11-25T17:10:21,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/88c6522200574c3ab000210c50a32a31 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/88c6522200574c3ab000210c50a32a31 2024-11-25T17:10:21,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/88c6522200574c3ab000210c50a32a31, entries=150, sequenceid=15, filesize=30.2 K 2024-11-25T17:10:21,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/f3a91a9458fa49a687dbf99a961414b0 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/f3a91a9458fa49a687dbf99a961414b0 2024-11-25T17:10:21,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/f3a91a9458fa49a687dbf99a961414b0, entries=150, sequenceid=15, filesize=11.7 K 2024-11-25T17:10:21,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/49064be13a0d456b8ec8a9faf2852ed4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/49064be13a0d456b8ec8a9faf2852ed4 2024-11-25T17:10:21,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/49064be13a0d456b8ec8a9faf2852ed4, entries=150, sequenceid=15, filesize=11.7 K 2024-11-25T17:10:21,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for c2213e06f0c6c3750162aafa4b26c5ef in 308ms, sequenceid=15, compaction requested=false 2024-11-25T17:10:21,209 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-25T17:10:21,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:21,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-25T17:10:21,253 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:21,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:21,260 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-25T17:10:21,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:21,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:21,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:21,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:21,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:21,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:21,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-25T17:10:21,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:21,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:21,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:21,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411258b98738dbffe4f65aaa77855989b41f7_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554620936/Put/seqid=0 2024-11-25T17:10:21,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554681278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554681279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554681281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554681283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554681290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742414_1590 (size=12154) 2024-11-25T17:10:21,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554681390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554681390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554681390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554681393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554681394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,415 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:21,416 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-25T17:10:21,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:21,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:21,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:21,416 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-25T17:10:21,569 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:21,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-25T17:10:21,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:21,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:21,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:21,569 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554681593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554681593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554681594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554681602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554681601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,710 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:21,725 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:21,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-25T17:10:21,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:21,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:21,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:21,730 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,739 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411258b98738dbffe4f65aaa77855989b41f7_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411258b98738dbffe4f65aaa77855989b41f7_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:21,750 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/760ccaa8803c44d8bf1766c789d5176b, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:21,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/760ccaa8803c44d8bf1766c789d5176b is 175, key is test_row_0/A:col10/1732554620936/Put/seqid=0 2024-11-25T17:10:21,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742415_1591 (size=30955) 2024-11-25T17:10:21,893 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:21,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-25T17:10:21,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:21,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:21,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:21,894 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:21,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554681895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554681897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554681902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554681906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:21,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:21,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554681911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:22,047 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:22,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-25T17:10:22,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:22,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:22,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:22,048 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:22,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:22,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-25T17:10:22,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:22,200 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:22,201 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-25T17:10:22,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:22,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:22,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:22,201 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:22,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:22,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:22,207 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/760ccaa8803c44d8bf1766c789d5176b 2024-11-25T17:10:22,209 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-25T17:10:22,227 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/1137cd2eb60a44e3ac663cd5b5781bbe is 50, key is test_row_0/B:col10/1732554620936/Put/seqid=0 2024-11-25T17:10:22,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742416_1592 (size=12001) 2024-11-25T17:10:22,241 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/1137cd2eb60a44e3ac663cd5b5781bbe 2024-11-25T17:10:22,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/1168dde13ef84c4483afd8412e73540f is 50, key is test_row_0/C:col10/1732554620936/Put/seqid=0 2024-11-25T17:10:22,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742417_1593 (size=12001) 2024-11-25T17:10:22,275 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/1168dde13ef84c4483afd8412e73540f 2024-11-25T17:10:22,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/760ccaa8803c44d8bf1766c789d5176b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/760ccaa8803c44d8bf1766c789d5176b 2024-11-25T17:10:22,294 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/760ccaa8803c44d8bf1766c789d5176b, entries=150, sequenceid=41, filesize=30.2 K 2024-11-25T17:10:22,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/1137cd2eb60a44e3ac663cd5b5781bbe as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1137cd2eb60a44e3ac663cd5b5781bbe 2024-11-25T17:10:22,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1137cd2eb60a44e3ac663cd5b5781bbe, entries=150, sequenceid=41, filesize=11.7 K 2024-11-25T17:10:22,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/1168dde13ef84c4483afd8412e73540f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/1168dde13ef84c4483afd8412e73540f 2024-11-25T17:10:22,313 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/1168dde13ef84c4483afd8412e73540f, entries=150, sequenceid=41, filesize=11.7 K 2024-11-25T17:10:22,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for c2213e06f0c6c3750162aafa4b26c5ef in 1061ms, sequenceid=41, compaction requested=false 2024-11-25T17:10:22,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:22,354 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:22,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-25T17:10:22,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:22,355 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-25T17:10:22,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:22,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:22,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:22,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:22,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:22,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:22,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125ca4b0cba992044bab0690060be8278a5_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554621279/Put/seqid=0 2024-11-25T17:10:22,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742418_1594 (size=12154) 2024-11-25T17:10:22,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:22,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:22,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:22,419 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125ca4b0cba992044bab0690060be8278a5_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ca4b0cba992044bab0690060be8278a5_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:22,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/899710e7c02f4f5b84698ea575ecfbcc, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:22,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/899710e7c02f4f5b84698ea575ecfbcc is 175, key is test_row_0/A:col10/1732554621279/Put/seqid=0 2024-11-25T17:10:22,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742419_1595 (size=30955) 2024-11-25T17:10:22,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:22,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554682506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:22,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:22,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554682510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:22,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:22,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554682510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:22,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:22,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554682511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:22,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:22,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554682511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:22,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:22,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:22,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554682615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:22,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554682615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:22,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:22,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554682616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:22,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:22,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554682616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:22,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:22,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554682616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:22,625 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T17:10:22,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:22,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554682819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:22,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:22,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554682819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:22,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:22,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554682820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:22,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:22,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554682820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:22,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:22,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554682828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:22,844 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/899710e7c02f4f5b84698ea575ecfbcc 2024-11-25T17:10:22,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/c8f915e3df82409097f70090978286f8 is 50, key is test_row_0/B:col10/1732554621279/Put/seqid=0 2024-11-25T17:10:22,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742420_1596 (size=12001) 2024-11-25T17:10:22,895 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/c8f915e3df82409097f70090978286f8 2024-11-25T17:10:22,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/de598d7bd63144a4bea1af21ec630598 is 50, key is test_row_0/C:col10/1732554621279/Put/seqid=0 2024-11-25T17:10:22,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742421_1597 (size=12001) 2024-11-25T17:10:22,932 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/de598d7bd63144a4bea1af21ec630598 2024-11-25T17:10:22,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/899710e7c02f4f5b84698ea575ecfbcc as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/899710e7c02f4f5b84698ea575ecfbcc 2024-11-25T17:10:23,000 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/899710e7c02f4f5b84698ea575ecfbcc, entries=150, sequenceid=52, filesize=30.2 K 2024-11-25T17:10:23,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/c8f915e3df82409097f70090978286f8 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/c8f915e3df82409097f70090978286f8 2024-11-25T17:10:23,038 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/c8f915e3df82409097f70090978286f8, entries=150, sequenceid=52, filesize=11.7 K 2024-11-25T17:10:23,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/de598d7bd63144a4bea1af21ec630598 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/de598d7bd63144a4bea1af21ec630598 2024-11-25T17:10:23,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-25T17:10:23,077 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/de598d7bd63144a4bea1af21ec630598, entries=150, sequenceid=52, filesize=11.7 K 2024-11-25T17:10:23,081 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for c2213e06f0c6c3750162aafa4b26c5ef in 726ms, sequenceid=52, compaction requested=true 2024-11-25T17:10:23,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:23,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:23,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-25T17:10:23,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-25T17:10:23,104 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-25T17:10:23,104 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1540 sec 2024-11-25T17:10:23,125 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 2.1740 sec 2024-11-25T17:10:23,126 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-25T17:10:23,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:23,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:23,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:23,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:23,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:23,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:23,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:23,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554683136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554683137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,143 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411256f83b24c4b3a4c78a40d1d4ab183e3de_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554622502/Put/seqid=0 2024-11-25T17:10:23,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554683140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554683141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554683141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742422_1598 (size=14594) 2024-11-25T17:10:23,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554683246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554683246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554683252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554683252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554683252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554683453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554683457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554683458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554683458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554683461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,556 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:23,564 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411256f83b24c4b3a4c78a40d1d4ab183e3de_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411256f83b24c4b3a4c78a40d1d4ab183e3de_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:23,565 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/6fc048c2aa9f448a99d738337149affe, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:23,567 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/6fc048c2aa9f448a99d738337149affe is 175, key is test_row_0/A:col10/1732554622502/Put/seqid=0 2024-11-25T17:10:23,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742423_1599 (size=39549) 2024-11-25T17:10:23,588 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/6fc048c2aa9f448a99d738337149affe 2024-11-25T17:10:23,619 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/400043c1e38a4d049e206c087a1c8ee8 is 50, key is test_row_0/B:col10/1732554622502/Put/seqid=0 2024-11-25T17:10:23,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742424_1600 (size=12001) 2024-11-25T17:10:23,656 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/400043c1e38a4d049e206c087a1c8ee8 2024-11-25T17:10:23,686 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/67e6450c24b54b768071bb2c41e956e7 is 50, key is test_row_0/C:col10/1732554622502/Put/seqid=0 2024-11-25T17:10:23,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742425_1601 (size=12001) 2024-11-25T17:10:23,729 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/67e6450c24b54b768071bb2c41e956e7 2024-11-25T17:10:23,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/6fc048c2aa9f448a99d738337149affe as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6fc048c2aa9f448a99d738337149affe 2024-11-25T17:10:23,750 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6fc048c2aa9f448a99d738337149affe, entries=200, sequenceid=78, filesize=38.6 K 2024-11-25T17:10:23,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/400043c1e38a4d049e206c087a1c8ee8 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/400043c1e38a4d049e206c087a1c8ee8 2024-11-25T17:10:23,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/400043c1e38a4d049e206c087a1c8ee8, entries=150, sequenceid=78, filesize=11.7 K 2024-11-25T17:10:23,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/67e6450c24b54b768071bb2c41e956e7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/67e6450c24b54b768071bb2c41e956e7 2024-11-25T17:10:23,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554683761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554683762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554683762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554683762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:23,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554683777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:23,791 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/67e6450c24b54b768071bb2c41e956e7, entries=150, sequenceid=78, filesize=11.7 K 2024-11-25T17:10:23,792 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for c2213e06f0c6c3750162aafa4b26c5ef in 666ms, sequenceid=78, compaction requested=true 2024-11-25T17:10:23,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:23,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:23,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:23,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:23,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-25T17:10:23,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:23,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-25T17:10:23,793 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:23,793 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:23,794 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132414 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:23,794 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/A is initiating minor compaction (all files) 2024-11-25T17:10:23,794 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/A in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:23,794 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/88c6522200574c3ab000210c50a32a31, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/760ccaa8803c44d8bf1766c789d5176b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/899710e7c02f4f5b84698ea575ecfbcc, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6fc048c2aa9f448a99d738337149affe] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=129.3 K 2024-11-25T17:10:23,794 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:23,794 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/88c6522200574c3ab000210c50a32a31, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/760ccaa8803c44d8bf1766c789d5176b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/899710e7c02f4f5b84698ea575ecfbcc, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6fc048c2aa9f448a99d738337149affe] 2024-11-25T17:10:23,795 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 88c6522200574c3ab000210c50a32a31, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732554620890 2024-11-25T17:10:23,795 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 760ccaa8803c44d8bf1766c789d5176b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732554620936 2024-11-25T17:10:23,796 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 899710e7c02f4f5b84698ea575ecfbcc, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732554621264 2024-11-25T17:10:23,796 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fc048c2aa9f448a99d738337149affe, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732554622502 2024-11-25T17:10:23,800 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:23,800 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/C is initiating minor compaction (all files) 2024-11-25T17:10:23,800 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/C in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:23,800 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/49064be13a0d456b8ec8a9faf2852ed4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/1168dde13ef84c4483afd8412e73540f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/de598d7bd63144a4bea1af21ec630598, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/67e6450c24b54b768071bb2c41e956e7] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=46.9 K 2024-11-25T17:10:23,805 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49064be13a0d456b8ec8a9faf2852ed4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732554620890 2024-11-25T17:10:23,808 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1168dde13ef84c4483afd8412e73540f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732554620936 2024-11-25T17:10:23,808 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting de598d7bd63144a4bea1af21ec630598, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732554621264 2024-11-25T17:10:23,811 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67e6450c24b54b768071bb2c41e956e7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732554622502 2024-11-25T17:10:23,825 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:23,841 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411253d853a058cde4f2e8f33624cff85fa33_c2213e06f0c6c3750162aafa4b26c5ef store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:23,844 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411253d853a058cde4f2e8f33624cff85fa33_c2213e06f0c6c3750162aafa4b26c5ef, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:23,844 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411253d853a058cde4f2e8f33624cff85fa33_c2213e06f0c6c3750162aafa4b26c5ef because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:23,848 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#C#compaction#518 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:23,848 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/f243ce3cd54949d9b82b092a2867a260 is 50, key is test_row_0/C:col10/1732554622502/Put/seqid=0 2024-11-25T17:10:23,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742426_1602 (size=4469) 2024-11-25T17:10:23,901 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#A#compaction#517 average throughput is 0.32 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:23,902 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/21ef1dc9c7ff4d34a2dc0f30d6345e38 is 175, key is test_row_0/A:col10/1732554622502/Put/seqid=0 2024-11-25T17:10:23,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742427_1603 (size=12139) 2024-11-25T17:10:23,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742428_1604 (size=31093) 2024-11-25T17:10:23,939 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/f243ce3cd54949d9b82b092a2867a260 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/f243ce3cd54949d9b82b092a2867a260 2024-11-25T17:10:23,955 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/C of c2213e06f0c6c3750162aafa4b26c5ef into f243ce3cd54949d9b82b092a2867a260(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:23,956 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:23,956 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/C, priority=12, startTime=1732554623793; duration=0sec 2024-11-25T17:10:23,956 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:23,956 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:C 2024-11-25T17:10:23,956 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:23,960 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:23,960 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/B is initiating minor compaction (all files) 2024-11-25T17:10:23,960 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/B in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:23,960 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/f3a91a9458fa49a687dbf99a961414b0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1137cd2eb60a44e3ac663cd5b5781bbe, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/c8f915e3df82409097f70090978286f8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/400043c1e38a4d049e206c087a1c8ee8] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=46.9 K 2024-11-25T17:10:23,963 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3a91a9458fa49a687dbf99a961414b0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732554620890 2024-11-25T17:10:23,963 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1137cd2eb60a44e3ac663cd5b5781bbe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732554620936 2024-11-25T17:10:23,964 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8f915e3df82409097f70090978286f8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732554621264 2024-11-25T17:10:23,971 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 400043c1e38a4d049e206c087a1c8ee8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732554622502 2024-11-25T17:10:24,012 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#B#compaction#519 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:24,013 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/a0f103b3dc3943b09d3ffb150165acde is 50, key is test_row_0/B:col10/1732554622502/Put/seqid=0 2024-11-25T17:10:24,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742429_1605 (size=12139) 2024-11-25T17:10:24,093 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/a0f103b3dc3943b09d3ffb150165acde as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/a0f103b3dc3943b09d3ffb150165acde 2024-11-25T17:10:24,113 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/B of c2213e06f0c6c3750162aafa4b26c5ef into a0f103b3dc3943b09d3ffb150165acde(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:24,114 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:24,114 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/B, priority=12, startTime=1732554623792; duration=0sec 2024-11-25T17:10:24,114 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:24,114 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:B 2024-11-25T17:10:24,275 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-25T17:10:24,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:24,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:24,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:24,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:24,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:24,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:24,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:24,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125a42028c1d9db4a599fca2222ce9b0aa3_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554624275/Put/seqid=0 2024-11-25T17:10:24,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742430_1606 (size=14594) 2024-11-25T17:10:24,300 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:24,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554684301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554684301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,306 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125a42028c1d9db4a599fca2222ce9b0aa3_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125a42028c1d9db4a599fca2222ce9b0aa3_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:24,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554684302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,306 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/e9b710cee89e485f85a33f11c37059b2, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:24,307 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/e9b710cee89e485f85a33f11c37059b2 is 175, key is test_row_0/A:col10/1732554624275/Put/seqid=0 2024-11-25T17:10:24,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554684305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554684305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742431_1607 (size=39549) 2024-11-25T17:10:24,343 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/21ef1dc9c7ff4d34a2dc0f30d6345e38 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/21ef1dc9c7ff4d34a2dc0f30d6345e38 2024-11-25T17:10:24,347 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/A of c2213e06f0c6c3750162aafa4b26c5ef into 21ef1dc9c7ff4d34a2dc0f30d6345e38(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:24,347 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:24,347 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/A, priority=12, startTime=1732554623792; duration=0sec 2024-11-25T17:10:24,347 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:24,347 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:A 2024-11-25T17:10:24,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554684407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554684407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554684407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554684411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554684416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554684611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554684612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554684613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554684615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554684625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,713 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/e9b710cee89e485f85a33f11c37059b2 2024-11-25T17:10:24,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/67b6c69edf5f4dab909213e70de29f84 is 50, key is test_row_0/B:col10/1732554624275/Put/seqid=0 2024-11-25T17:10:24,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742432_1608 (size=12001) 2024-11-25T17:10:24,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554684915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554684917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554684918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554684919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:24,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:24,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554684928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:25,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-25T17:10:25,055 INFO [Thread-2582 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-25T17:10:25,064 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:10:25,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-25T17:10:25,066 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:10:25,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-25T17:10:25,067 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:10:25,067 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:10:25,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-25T17:10:25,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/67b6c69edf5f4dab909213e70de29f84 2024-11-25T17:10:25,219 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:25,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-25T17:10:25,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:25,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:25,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:25,220 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:25,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:25,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:25,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/f16ff0241e5c4850952d0b15791ae080 is 50, key is test_row_0/C:col10/1732554624275/Put/seqid=0 2024-11-25T17:10:25,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742433_1609 (size=12001) 2024-11-25T17:10:25,270 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/f16ff0241e5c4850952d0b15791ae080 2024-11-25T17:10:25,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/e9b710cee89e485f85a33f11c37059b2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/e9b710cee89e485f85a33f11c37059b2 2024-11-25T17:10:25,280 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/e9b710cee89e485f85a33f11c37059b2, entries=200, sequenceid=95, filesize=38.6 K 2024-11-25T17:10:25,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/67b6c69edf5f4dab909213e70de29f84 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/67b6c69edf5f4dab909213e70de29f84 2024-11-25T17:10:25,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/67b6c69edf5f4dab909213e70de29f84, entries=150, sequenceid=95, filesize=11.7 K 2024-11-25T17:10:25,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/f16ff0241e5c4850952d0b15791ae080 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/f16ff0241e5c4850952d0b15791ae080 2024-11-25T17:10:25,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/f16ff0241e5c4850952d0b15791ae080, entries=150, sequenceid=95, filesize=11.7 K 2024-11-25T17:10:25,298 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for c2213e06f0c6c3750162aafa4b26c5ef in 1023ms, sequenceid=95, compaction requested=false 2024-11-25T17:10:25,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:25,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-25T17:10:25,372 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:25,373 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-25T17:10:25,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:25,374 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-25T17:10:25,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:25,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:25,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:25,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:25,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:25,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:25,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125273e16518f204794b4e638fe0a11130a_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554624301/Put/seqid=0 2024-11-25T17:10:25,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742434_1610 (size=12154) 2024-11-25T17:10:25,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:25,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:25,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:25,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554685438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:25,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:25,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554685440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:25,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:25,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554685441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:25,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:25,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554685449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:25,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:25,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554685449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:25,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:25,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554685552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:25,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:25,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554685552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:25,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:25,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554685552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:25,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:25,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554685554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:25,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:25,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554685555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:25,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-25T17:10:25,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:25,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:25,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554685759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:25,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554685758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:25,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:25,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554685759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:25,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:25,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554685763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:25,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:25,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554685763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:25,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:25,802 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125273e16518f204794b4e638fe0a11130a_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125273e16518f204794b4e638fe0a11130a_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:25,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/390dfcd3f4274275a8e8c497e57c7f49, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:25,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/390dfcd3f4274275a8e8c497e57c7f49 is 175, key is test_row_0/A:col10/1732554624301/Put/seqid=0 2024-11-25T17:10:25,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742435_1611 (size=30955) 2024-11-25T17:10:25,850 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/390dfcd3f4274275a8e8c497e57c7f49 2024-11-25T17:10:25,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/3640a04b1f664cbd9105775848eae9e6 is 50, key is test_row_0/B:col10/1732554624301/Put/seqid=0 2024-11-25T17:10:25,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742436_1612 (size=12001) 2024-11-25T17:10:26,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554686063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554686064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554686064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554686067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554686069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-25T17:10:26,284 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/3640a04b1f664cbd9105775848eae9e6 2024-11-25T17:10:26,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/fe04dd247e3846f4bb017de4d3e240f8 is 50, key is test_row_0/C:col10/1732554624301/Put/seqid=0 2024-11-25T17:10:26,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742437_1613 (size=12001) 2024-11-25T17:10:26,300 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/fe04dd247e3846f4bb017de4d3e240f8 2024-11-25T17:10:26,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/390dfcd3f4274275a8e8c497e57c7f49 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/390dfcd3f4274275a8e8c497e57c7f49 2024-11-25T17:10:26,307 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/390dfcd3f4274275a8e8c497e57c7f49, entries=150, sequenceid=118, filesize=30.2 K 2024-11-25T17:10:26,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/3640a04b1f664cbd9105775848eae9e6 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3640a04b1f664cbd9105775848eae9e6 2024-11-25T17:10:26,320 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3640a04b1f664cbd9105775848eae9e6, entries=150, sequenceid=118, filesize=11.7 K 2024-11-25T17:10:26,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/fe04dd247e3846f4bb017de4d3e240f8 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/fe04dd247e3846f4bb017de4d3e240f8 2024-11-25T17:10:26,326 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/fe04dd247e3846f4bb017de4d3e240f8, entries=150, sequenceid=118, filesize=11.7 K 2024-11-25T17:10:26,327 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=87.22 KB/89310 for c2213e06f0c6c3750162aafa4b26c5ef in 953ms, sequenceid=118, compaction requested=true 2024-11-25T17:10:26,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:26,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:26,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-25T17:10:26,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-25T17:10:26,345 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-25T17:10:26,346 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2660 sec 2024-11-25T17:10:26,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 1.2840 sec 2024-11-25T17:10:26,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:26,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-25T17:10:26,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:26,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:26,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:26,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:26,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:26,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:26,580 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125ebc0d81856524bb5939e7b2e309281fd_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554626568/Put/seqid=0 2024-11-25T17:10:26,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554686589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554686593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554686594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554686594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554686599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742438_1614 (size=14794) 2024-11-25T17:10:26,614 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:26,619 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125ebc0d81856524bb5939e7b2e309281fd_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ebc0d81856524bb5939e7b2e309281fd_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:26,620 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/cbab9ee9e0644ff2b34074926a361e35, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:26,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/cbab9ee9e0644ff2b34074926a361e35 is 175, key is test_row_0/A:col10/1732554626568/Put/seqid=0 2024-11-25T17:10:26,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742439_1615 (size=39749) 2024-11-25T17:10:26,642 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/cbab9ee9e0644ff2b34074926a361e35 2024-11-25T17:10:26,675 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/63933b197e424950b6e667879e3ccbd7 is 50, key is test_row_0/B:col10/1732554626568/Put/seqid=0 2024-11-25T17:10:26,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554686696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742440_1616 (size=12151) 2024-11-25T17:10:26,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554686701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554686701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,706 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/63933b197e424950b6e667879e3ccbd7 2024-11-25T17:10:26,709 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554686705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554686707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,729 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/d55260ad24314b62a001f88a82aff9f2 is 50, key is test_row_0/C:col10/1732554626568/Put/seqid=0 2024-11-25T17:10:26,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742441_1617 (size=12151) 2024-11-25T17:10:26,752 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/d55260ad24314b62a001f88a82aff9f2 2024-11-25T17:10:26,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/cbab9ee9e0644ff2b34074926a361e35 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/cbab9ee9e0644ff2b34074926a361e35 2024-11-25T17:10:26,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/cbab9ee9e0644ff2b34074926a361e35, entries=200, sequenceid=135, filesize=38.8 K 2024-11-25T17:10:26,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/63933b197e424950b6e667879e3ccbd7 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/63933b197e424950b6e667879e3ccbd7 2024-11-25T17:10:26,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/63933b197e424950b6e667879e3ccbd7, entries=150, sequenceid=135, filesize=11.9 K 2024-11-25T17:10:26,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/d55260ad24314b62a001f88a82aff9f2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/d55260ad24314b62a001f88a82aff9f2 2024-11-25T17:10:26,806 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/d55260ad24314b62a001f88a82aff9f2, entries=150, sequenceid=135, filesize=11.9 K 2024-11-25T17:10:26,807 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for c2213e06f0c6c3750162aafa4b26c5ef in 238ms, sequenceid=135, compaction requested=true 2024-11-25T17:10:26,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:26,807 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:26,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:26,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:26,807 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:26,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:26,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:26,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:26,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:26,808 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48292 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:26,808 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/B is initiating minor compaction (all files) 2024-11-25T17:10:26,808 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/B in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:26,809 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/a0f103b3dc3943b09d3ffb150165acde, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/67b6c69edf5f4dab909213e70de29f84, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3640a04b1f664cbd9105775848eae9e6, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/63933b197e424950b6e667879e3ccbd7] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=47.2 K 2024-11-25T17:10:26,809 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141346 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:26,809 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/A is initiating minor compaction (all files) 2024-11-25T17:10:26,809 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/A in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:26,809 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/21ef1dc9c7ff4d34a2dc0f30d6345e38, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/e9b710cee89e485f85a33f11c37059b2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/390dfcd3f4274275a8e8c497e57c7f49, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/cbab9ee9e0644ff2b34074926a361e35] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=138.0 K 2024-11-25T17:10:26,809 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:26,809 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/21ef1dc9c7ff4d34a2dc0f30d6345e38, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/e9b710cee89e485f85a33f11c37059b2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/390dfcd3f4274275a8e8c497e57c7f49, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/cbab9ee9e0644ff2b34074926a361e35] 2024-11-25T17:10:26,810 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting a0f103b3dc3943b09d3ffb150165acde, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732554622502 2024-11-25T17:10:26,810 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21ef1dc9c7ff4d34a2dc0f30d6345e38, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732554622502 2024-11-25T17:10:26,810 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 67b6c69edf5f4dab909213e70de29f84, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732554623139 2024-11-25T17:10:26,810 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9b710cee89e485f85a33f11c37059b2, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732554623139 2024-11-25T17:10:26,811 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 390dfcd3f4274275a8e8c497e57c7f49, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732554624291 2024-11-25T17:10:26,811 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 3640a04b1f664cbd9105775848eae9e6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732554624291 2024-11-25T17:10:26,811 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbab9ee9e0644ff2b34074926a361e35, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732554625438 2024-11-25T17:10:26,812 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 63933b197e424950b6e667879e3ccbd7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732554625438 2024-11-25T17:10:26,827 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#B#compaction#529 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:26,828 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/935c3da8140941ffa88f503c27e397d9 is 50, key is test_row_0/B:col10/1732554626568/Put/seqid=0 2024-11-25T17:10:26,829 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:26,832 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241125496b4c6872a446338cadfaf8c0107ea3_c2213e06f0c6c3750162aafa4b26c5ef store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:26,834 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241125496b4c6872a446338cadfaf8c0107ea3_c2213e06f0c6c3750162aafa4b26c5ef, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:26,834 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125496b4c6872a446338cadfaf8c0107ea3_c2213e06f0c6c3750162aafa4b26c5ef because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:26,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742442_1618 (size=12425) 2024-11-25T17:10:26,870 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/935c3da8140941ffa88f503c27e397d9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/935c3da8140941ffa88f503c27e397d9 2024-11-25T17:10:26,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742443_1619 (size=4469) 2024-11-25T17:10:26,901 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/B of c2213e06f0c6c3750162aafa4b26c5ef into 935c3da8140941ffa88f503c27e397d9(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:26,901 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:26,901 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/B, priority=12, startTime=1732554626807; duration=0sec 2024-11-25T17:10:26,902 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:26,902 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:B 2024-11-25T17:10:26,902 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:26,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:26,903 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-25T17:10:26,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:26,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:26,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:26,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:26,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:26,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:26,917 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48292 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:26,917 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/C is initiating minor compaction (all files) 2024-11-25T17:10:26,917 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/C in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:26,917 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/f243ce3cd54949d9b82b092a2867a260, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/f16ff0241e5c4850952d0b15791ae080, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/fe04dd247e3846f4bb017de4d3e240f8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/d55260ad24314b62a001f88a82aff9f2] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=47.2 K 2024-11-25T17:10:26,918 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting f243ce3cd54949d9b82b092a2867a260, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732554622502 2024-11-25T17:10:26,918 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting f16ff0241e5c4850952d0b15791ae080, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1732554623139 2024-11-25T17:10:26,918 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting fe04dd247e3846f4bb017de4d3e240f8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732554624291 2024-11-25T17:10:26,919 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting d55260ad24314b62a001f88a82aff9f2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732554625438 2024-11-25T17:10:26,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411258de6b788b1af4d938cd0198f63ecaf5d_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554626901/Put/seqid=0 2024-11-25T17:10:26,935 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#C#compaction#532 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:26,936 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/b40902f6b4e544f9b1d00051215861d6 is 50, key is test_row_0/C:col10/1732554626568/Put/seqid=0 2024-11-25T17:10:26,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742444_1620 (size=14794) 2024-11-25T17:10:26,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742445_1621 (size=12425) 2024-11-25T17:10:26,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554686939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554686940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554686940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554686946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:26,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:26,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554686950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:27,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:27,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554687048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:27,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:27,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554687048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:27,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:27,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554687048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:27,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:27,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554687056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:27,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:27,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554687056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:27,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-25T17:10:27,179 INFO [Thread-2582 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-25T17:10:27,184 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:10:27,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-25T17:10:27,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-25T17:10:27,188 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:10:27,188 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:10:27,189 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:10:27,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:27,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554687250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:27,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:27,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554687251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:27,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:27,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554687251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:27,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:27,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554687280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:27,283 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#A#compaction#530 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:27,284 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/fed3e7e7231c4b88adf948bff3044584 is 175, key is test_row_0/A:col10/1732554626568/Put/seqid=0 2024-11-25T17:10:27,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-25T17:10:27,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:27,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554687289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:27,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742446_1622 (size=31379) 2024-11-25T17:10:27,313 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/fed3e7e7231c4b88adf948bff3044584 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/fed3e7e7231c4b88adf948bff3044584 2024-11-25T17:10:27,319 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/A of c2213e06f0c6c3750162aafa4b26c5ef into fed3e7e7231c4b88adf948bff3044584(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:27,319 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:27,319 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/A, priority=12, startTime=1732554626807; duration=0sec 2024-11-25T17:10:27,319 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:27,319 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:A 2024-11-25T17:10:27,340 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:27,341 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-25T17:10:27,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:27,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:27,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:27,341 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:27,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:27,344 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:27,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:27,355 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411258de6b788b1af4d938cd0198f63ecaf5d_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411258de6b788b1af4d938cd0198f63ecaf5d_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:27,356 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/6d8653ccc3a740d78e883b69eec2a4e9, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:27,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/6d8653ccc3a740d78e883b69eec2a4e9 is 175, key is test_row_0/A:col10/1732554626901/Put/seqid=0 2024-11-25T17:10:27,359 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/b40902f6b4e544f9b1d00051215861d6 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b40902f6b4e544f9b1d00051215861d6 2024-11-25T17:10:27,363 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/C of c2213e06f0c6c3750162aafa4b26c5ef into b40902f6b4e544f9b1d00051215861d6(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:27,363 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:27,363 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/C, priority=12, startTime=1732554626808; duration=0sec 2024-11-25T17:10:27,364 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:27,364 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:C 2024-11-25T17:10:27,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742447_1623 (size=39749) 2024-11-25T17:10:27,367 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/6d8653ccc3a740d78e883b69eec2a4e9 2024-11-25T17:10:27,374 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/6511dba193d34220ad9d6f1b27bf952e is 50, key is test_row_0/B:col10/1732554626901/Put/seqid=0 2024-11-25T17:10:27,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742448_1624 (size=12151) 2024-11-25T17:10:27,380 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/6511dba193d34220ad9d6f1b27bf952e 2024-11-25T17:10:27,397 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/32939c68654d42678da5876191077ae0 is 50, key is test_row_0/C:col10/1732554626901/Put/seqid=0 2024-11-25T17:10:27,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742449_1625 (size=12151) 2024-11-25T17:10:27,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-25T17:10:27,499 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:27,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-25T17:10:27,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:27,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:27,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:27,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:27,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:27,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:27,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:27,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554687556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:27,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:27,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554687557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:27,567 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:27,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554687565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:27,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:27,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554687583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:27,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:27,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554687600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:27,651 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:27,652 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-25T17:10:27,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:27,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:27,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:27,652 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:27,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:27,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:27,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-25T17:10:27,809 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:27,810 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-25T17:10:27,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:27,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:27,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:27,810 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:27,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:27,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:27,812 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/32939c68654d42678da5876191077ae0 2024-11-25T17:10:27,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/6d8653ccc3a740d78e883b69eec2a4e9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6d8653ccc3a740d78e883b69eec2a4e9 2024-11-25T17:10:27,857 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6d8653ccc3a740d78e883b69eec2a4e9, entries=200, sequenceid=157, filesize=38.8 K 2024-11-25T17:10:27,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/6511dba193d34220ad9d6f1b27bf952e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/6511dba193d34220ad9d6f1b27bf952e 2024-11-25T17:10:27,909 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/6511dba193d34220ad9d6f1b27bf952e, entries=150, sequenceid=157, filesize=11.9 K 2024-11-25T17:10:27,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/32939c68654d42678da5876191077ae0 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/32939c68654d42678da5876191077ae0 2024-11-25T17:10:27,915 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/32939c68654d42678da5876191077ae0, entries=150, sequenceid=157, filesize=11.9 K 2024-11-25T17:10:27,916 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for c2213e06f0c6c3750162aafa4b26c5ef in 1013ms, sequenceid=157, compaction requested=false 2024-11-25T17:10:27,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:27,965 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:27,969 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-25T17:10:27,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:27,973 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-25T17:10:27,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:27,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:27,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:27,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:27,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:27,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:27,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125199e09def6c341ae91da8679e9f842b1_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554626922/Put/seqid=0 2024-11-25T17:10:28,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742450_1626 (size=12304) 2024-11-25T17:10:28,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:28,040 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125199e09def6c341ae91da8679e9f842b1_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125199e09def6c341ae91da8679e9f842b1_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:28,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/ee560ecb3b244b2d8db60683b9393a62, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:28,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/ee560ecb3b244b2d8db60683b9393a62 is 175, key is test_row_0/A:col10/1732554626922/Put/seqid=0 2024-11-25T17:10:28,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742451_1627 (size=31105) 2024-11-25T17:10:28,055 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=175, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/ee560ecb3b244b2d8db60683b9393a62 2024-11-25T17:10:28,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/0c1a67d2919d443890182b5093d8c68f is 50, key is test_row_0/B:col10/1732554626922/Put/seqid=0 2024-11-25T17:10:28,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:28,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:28,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742452_1628 (size=12151) 2024-11-25T17:10:28,100 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/0c1a67d2919d443890182b5093d8c68f 2024-11-25T17:10:28,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554688117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554688117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554688119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,130 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554688125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554688128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/9d46fff8504d43878f07e64975063372 is 50, key is test_row_0/C:col10/1732554626922/Put/seqid=0 2024-11-25T17:10:28,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742453_1629 (size=12151) 2024-11-25T17:10:28,173 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/9d46fff8504d43878f07e64975063372 2024-11-25T17:10:28,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/ee560ecb3b244b2d8db60683b9393a62 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/ee560ecb3b244b2d8db60683b9393a62 2024-11-25T17:10:28,195 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/ee560ecb3b244b2d8db60683b9393a62, entries=150, sequenceid=175, filesize=30.4 K 2024-11-25T17:10:28,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/0c1a67d2919d443890182b5093d8c68f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/0c1a67d2919d443890182b5093d8c68f 2024-11-25T17:10:28,200 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/0c1a67d2919d443890182b5093d8c68f, entries=150, sequenceid=175, filesize=11.9 K 2024-11-25T17:10:28,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/9d46fff8504d43878f07e64975063372 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/9d46fff8504d43878f07e64975063372 2024-11-25T17:10:28,206 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/9d46fff8504d43878f07e64975063372, entries=150, sequenceid=175, filesize=11.9 K 2024-11-25T17:10:28,207 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for c2213e06f0c6c3750162aafa4b26c5ef in 234ms, sequenceid=175, compaction requested=true 2024-11-25T17:10:28,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:28,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:28,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-25T17:10:28,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-25T17:10:28,211 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-25T17:10:28,211 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0190 sec 2024-11-25T17:10:28,213 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 1.0280 sec 2024-11-25T17:10:28,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:28,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-25T17:10:28,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:28,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:28,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:28,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:28,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:28,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:28,236 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125798bacfe3ba140e085b4838598ecb00f_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554628226/Put/seqid=0 2024-11-25T17:10:28,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742454_1630 (size=14794) 2024-11-25T17:10:28,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554688241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554688243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554688243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554688243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554688244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-25T17:10:28,292 INFO [Thread-2582 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-25T17:10:28,293 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:10:28,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-25T17:10:28,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-25T17:10:28,294 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:10:28,294 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:10:28,295 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:10:28,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554688346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554688346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554688347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554688347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554688353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-25T17:10:28,446 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:28,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-25T17:10:28,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:28,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:28,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:28,450 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:28,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:28,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:28,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554688549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554688551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554688551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554688552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554688560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-25T17:10:28,602 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:28,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-25T17:10:28,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:28,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:28,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:28,603 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:28,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:28,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:28,653 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:28,701 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125798bacfe3ba140e085b4838598ecb00f_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125798bacfe3ba140e085b4838598ecb00f_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:28,706 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/de6b7b29b7a342f8aee5a04e0068d1fc, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:28,707 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/de6b7b29b7a342f8aee5a04e0068d1fc is 175, key is test_row_0/A:col10/1732554628226/Put/seqid=0 2024-11-25T17:10:28,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742455_1631 (size=39749) 2024-11-25T17:10:28,729 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/de6b7b29b7a342f8aee5a04e0068d1fc 2024-11-25T17:10:28,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/d11a8fe6001c46fa9ab3f8fb3f7e81e3 is 50, key is test_row_0/B:col10/1732554628226/Put/seqid=0 2024-11-25T17:10:28,756 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:28,757 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-25T17:10:28,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:28,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:28,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:28,757 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:28,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:28,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:28,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742456_1632 (size=12151) 2024-11-25T17:10:28,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554688855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554688855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554688858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554688865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:28,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554688866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:28,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-25T17:10:28,910 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:28,910 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-25T17:10:28,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:28,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:28,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:28,911 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:28,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:28,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:29,067 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:29,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-25T17:10:29,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:29,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:29,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:29,068 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:29,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:29,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:29,172 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/d11a8fe6001c46fa9ab3f8fb3f7e81e3 2024-11-25T17:10:29,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/dddb377888e34243ad1c638456a1baed is 50, key is test_row_0/C:col10/1732554628226/Put/seqid=0 2024-11-25T17:10:29,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742457_1633 (size=12151) 2024-11-25T17:10:29,220 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:29,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-25T17:10:29,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:29,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:29,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:29,221 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:29,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:29,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:29,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:29,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554689359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:29,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:29,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554689359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:29,372 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:29,376 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-25T17:10:29,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:29,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:29,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:29,377 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:29,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:29,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:29,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554689376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:29,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:29,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554689376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:29,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:29,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554689378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:29,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:29,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-25T17:10:29,536 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:29,536 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-25T17:10:29,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:29,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:29,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:29,536 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:29,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:29,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:29,605 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/dddb377888e34243ad1c638456a1baed 2024-11-25T17:10:29,627 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/de6b7b29b7a342f8aee5a04e0068d1fc as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/de6b7b29b7a342f8aee5a04e0068d1fc 2024-11-25T17:10:29,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/de6b7b29b7a342f8aee5a04e0068d1fc, entries=200, sequenceid=197, filesize=38.8 K 2024-11-25T17:10:29,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/d11a8fe6001c46fa9ab3f8fb3f7e81e3 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/d11a8fe6001c46fa9ab3f8fb3f7e81e3 2024-11-25T17:10:29,642 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/d11a8fe6001c46fa9ab3f8fb3f7e81e3, entries=150, sequenceid=197, filesize=11.9 K 2024-11-25T17:10:29,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/dddb377888e34243ad1c638456a1baed as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/dddb377888e34243ad1c638456a1baed 2024-11-25T17:10:29,647 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/dddb377888e34243ad1c638456a1baed, entries=150, sequenceid=197, filesize=11.9 K 2024-11-25T17:10:29,648 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=87.22 KB/89310 for c2213e06f0c6c3750162aafa4b26c5ef in 1420ms, sequenceid=197, compaction requested=true 2024-11-25T17:10:29,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:29,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:29,648 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:29,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:29,648 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:29,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:29,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:29,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:29,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:29,649 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48878 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:29,649 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/B is initiating minor compaction (all files) 2024-11-25T17:10:29,649 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/B in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:29,649 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/935c3da8140941ffa88f503c27e397d9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/6511dba193d34220ad9d6f1b27bf952e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/0c1a67d2919d443890182b5093d8c68f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/d11a8fe6001c46fa9ab3f8fb3f7e81e3] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=47.7 K 2024-11-25T17:10:29,649 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141982 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:29,650 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 935c3da8140941ffa88f503c27e397d9, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732554625438 2024-11-25T17:10:29,650 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/A is initiating minor compaction (all files) 2024-11-25T17:10:29,650 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/A in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:29,650 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/fed3e7e7231c4b88adf948bff3044584, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6d8653ccc3a740d78e883b69eec2a4e9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/ee560ecb3b244b2d8db60683b9393a62, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/de6b7b29b7a342f8aee5a04e0068d1fc] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=138.7 K 2024-11-25T17:10:29,650 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:29,650 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 6511dba193d34220ad9d6f1b27bf952e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732554626586 2024-11-25T17:10:29,650 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/fed3e7e7231c4b88adf948bff3044584, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6d8653ccc3a740d78e883b69eec2a4e9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/ee560ecb3b244b2d8db60683b9393a62, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/de6b7b29b7a342f8aee5a04e0068d1fc] 2024-11-25T17:10:29,650 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c1a67d2919d443890182b5093d8c68f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732554626922 2024-11-25T17:10:29,650 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting fed3e7e7231c4b88adf948bff3044584, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732554625438 2024-11-25T17:10:29,650 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting d11a8fe6001c46fa9ab3f8fb3f7e81e3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732554628117 2024-11-25T17:10:29,651 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d8653ccc3a740d78e883b69eec2a4e9, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732554626586 2024-11-25T17:10:29,651 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee560ecb3b244b2d8db60683b9393a62, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732554626922 2024-11-25T17:10:29,651 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting de6b7b29b7a342f8aee5a04e0068d1fc, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732554628117 2024-11-25T17:10:29,658 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#B#compaction#541 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:29,659 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/1376ad1d2db34770a7f9f66ce6790081 is 50, key is test_row_0/B:col10/1732554628226/Put/seqid=0 2024-11-25T17:10:29,663 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:29,667 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241125bea48e7ad6cd45cca56ad09b63c87d80_c2213e06f0c6c3750162aafa4b26c5ef store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:29,670 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241125bea48e7ad6cd45cca56ad09b63c87d80_c2213e06f0c6c3750162aafa4b26c5ef, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:29,670 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125bea48e7ad6cd45cca56ad09b63c87d80_c2213e06f0c6c3750162aafa4b26c5ef because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:29,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742459_1635 (size=4469) 2024-11-25T17:10:29,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742458_1634 (size=12561) 2024-11-25T17:10:29,689 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:29,689 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-25T17:10:29,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:29,689 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-25T17:10:29,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:29,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:29,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:29,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:29,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:29,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:29,692 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/1376ad1d2db34770a7f9f66ce6790081 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1376ad1d2db34770a7f9f66ce6790081 2024-11-25T17:10:29,698 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/B of c2213e06f0c6c3750162aafa4b26c5ef into 1376ad1d2db34770a7f9f66ce6790081(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:29,698 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:29,698 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/B, priority=12, startTime=1732554629648; duration=0sec 2024-11-25T17:10:29,698 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:29,698 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:B 2024-11-25T17:10:29,698 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:29,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125047328a8003a436484490029adb7c8e4_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554628242/Put/seqid=0 2024-11-25T17:10:29,700 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48878 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:29,700 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/C is initiating minor compaction (all files) 2024-11-25T17:10:29,700 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/C in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:29,700 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b40902f6b4e544f9b1d00051215861d6, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/32939c68654d42678da5876191077ae0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/9d46fff8504d43878f07e64975063372, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/dddb377888e34243ad1c638456a1baed] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=47.7 K 2024-11-25T17:10:29,701 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting b40902f6b4e544f9b1d00051215861d6, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1732554625438 2024-11-25T17:10:29,701 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 32939c68654d42678da5876191077ae0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732554626586 2024-11-25T17:10:29,703 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d46fff8504d43878f07e64975063372, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732554626922 2024-11-25T17:10:29,704 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting dddb377888e34243ad1c638456a1baed, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732554628117 2024-11-25T17:10:29,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742460_1636 (size=12304) 2024-11-25T17:10:29,711 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#C#compaction#544 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:29,711 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/db2278dd928e4945a0bada9ae33bdb7c is 50, key is test_row_0/C:col10/1732554628226/Put/seqid=0 2024-11-25T17:10:29,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742461_1637 (size=12561) 2024-11-25T17:10:29,742 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/db2278dd928e4945a0bada9ae33bdb7c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/db2278dd928e4945a0bada9ae33bdb7c 2024-11-25T17:10:29,745 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/C of c2213e06f0c6c3750162aafa4b26c5ef into db2278dd928e4945a0bada9ae33bdb7c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:29,745 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:29,745 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/C, priority=12, startTime=1732554629648; duration=0sec 2024-11-25T17:10:29,745 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:29,745 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:C 2024-11-25T17:10:30,084 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#A#compaction#542 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:30,085 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/69578324487048128b6df6621b233868 is 175, key is test_row_0/A:col10/1732554628226/Put/seqid=0 2024-11-25T17:10:30,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742462_1638 (size=31515) 2024-11-25T17:10:30,100 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/69578324487048128b6df6621b233868 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/69578324487048128b6df6621b233868 2024-11-25T17:10:30,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:30,105 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/A of c2213e06f0c6c3750162aafa4b26c5ef into 69578324487048128b6df6621b233868(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:30,105 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:30,105 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/A, priority=12, startTime=1732554629648; duration=0sec 2024-11-25T17:10:30,105 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:30,105 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:A 2024-11-25T17:10:30,107 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125047328a8003a436484490029adb7c8e4_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125047328a8003a436484490029adb7c8e4_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:30,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/7e05393a6565481db29992797f4b5d6f, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:30,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/7e05393a6565481db29992797f4b5d6f is 175, key is test_row_0/A:col10/1732554628242/Put/seqid=0 2024-11-25T17:10:30,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742463_1639 (size=31105) 2024-11-25T17:10:30,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:30,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:30,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-25T17:10:30,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554690413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554690421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554690421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554690425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554690429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,519 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/7e05393a6565481db29992797f4b5d6f 2024-11-25T17:10:30,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554690523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/38af93d56bb941a0a3235705f14b1244 is 50, key is test_row_0/B:col10/1732554628242/Put/seqid=0 2024-11-25T17:10:30,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554690526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554690529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554690534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554690534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742464_1640 (size=12151) 2024-11-25T17:10:30,569 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/38af93d56bb941a0a3235705f14b1244 2024-11-25T17:10:30,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/055ce72fb4eb403a9317df8d79fc49a1 is 50, key is test_row_0/C:col10/1732554628242/Put/seqid=0 2024-11-25T17:10:30,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742465_1641 (size=12151) 2024-11-25T17:10:30,588 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/055ce72fb4eb403a9317df8d79fc49a1 2024-11-25T17:10:30,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/7e05393a6565481db29992797f4b5d6f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/7e05393a6565481db29992797f4b5d6f 2024-11-25T17:10:30,604 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/7e05393a6565481db29992797f4b5d6f, entries=150, sequenceid=213, filesize=30.4 K 2024-11-25T17:10:30,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/38af93d56bb941a0a3235705f14b1244 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/38af93d56bb941a0a3235705f14b1244 2024-11-25T17:10:30,610 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/38af93d56bb941a0a3235705f14b1244, entries=150, sequenceid=213, filesize=11.9 K 2024-11-25T17:10:30,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/055ce72fb4eb403a9317df8d79fc49a1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/055ce72fb4eb403a9317df8d79fc49a1 2024-11-25T17:10:30,614 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/055ce72fb4eb403a9317df8d79fc49a1, entries=150, sequenceid=213, filesize=11.9 K 2024-11-25T17:10:30,614 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for c2213e06f0c6c3750162aafa4b26c5ef in 925ms, sequenceid=213, compaction requested=false 2024-11-25T17:10:30,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:30,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:30,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-25T17:10:30,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-25T17:10:30,617 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-25T17:10:30,617 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3220 sec 2024-11-25T17:10:30,619 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 2.3250 sec 2024-11-25T17:10:30,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:30,729 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-25T17:10:30,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:30,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:30,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:30,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:30,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:30,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:30,737 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125e3aab389a0a54c879d2bd0d03085e9be_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554630409/Put/seqid=0 2024-11-25T17:10:30,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742466_1642 (size=14794) 2024-11-25T17:10:30,742 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:30,747 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125e3aab389a0a54c879d2bd0d03085e9be_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125e3aab389a0a54c879d2bd0d03085e9be_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:30,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,747 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/4e0c7fbe40e94cb29620d3db52d680e1, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:30,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554690744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,748 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/4e0c7fbe40e94cb29620d3db52d680e1 is 175, key is test_row_0/A:col10/1732554630409/Put/seqid=0 2024-11-25T17:10:30,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554690745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554690746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554690746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554690747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742467_1643 (size=39749) 2024-11-25T17:10:30,752 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=238, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/4e0c7fbe40e94cb29620d3db52d680e1 2024-11-25T17:10:30,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/13de382ece3b42bcbaaf4830f18fce60 is 50, key is test_row_0/B:col10/1732554630409/Put/seqid=0 2024-11-25T17:10:30,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742468_1644 (size=12151) 2024-11-25T17:10:30,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554690849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554690854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554690854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554690854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:30,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:30,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554690854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554691056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554691056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554691058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554691059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554691059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,206 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/13de382ece3b42bcbaaf4830f18fce60 2024-11-25T17:10:31,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/b0be6a84090348c78152c616cc862f92 is 50, key is test_row_0/C:col10/1732554630409/Put/seqid=0 2024-11-25T17:10:31,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742469_1645 (size=12151) 2024-11-25T17:10:31,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/b0be6a84090348c78152c616cc862f92 2024-11-25T17:10:31,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/4e0c7fbe40e94cb29620d3db52d680e1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/4e0c7fbe40e94cb29620d3db52d680e1 2024-11-25T17:10:31,315 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/4e0c7fbe40e94cb29620d3db52d680e1, entries=200, sequenceid=238, filesize=38.8 K 2024-11-25T17:10:31,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/13de382ece3b42bcbaaf4830f18fce60 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/13de382ece3b42bcbaaf4830f18fce60 2024-11-25T17:10:31,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/13de382ece3b42bcbaaf4830f18fce60, entries=150, sequenceid=238, filesize=11.9 K 2024-11-25T17:10:31,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/b0be6a84090348c78152c616cc862f92 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b0be6a84090348c78152c616cc862f92 2024-11-25T17:10:31,324 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b0be6a84090348c78152c616cc862f92, entries=150, sequenceid=238, filesize=11.9 K 2024-11-25T17:10:31,329 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for c2213e06f0c6c3750162aafa4b26c5ef in 600ms, sequenceid=238, compaction requested=true 2024-11-25T17:10:31,330 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:31,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:31,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:31,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:31,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:31,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:31,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-25T17:10:31,330 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:31,331 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:31,331 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/A is initiating minor compaction (all files) 2024-11-25T17:10:31,331 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/A in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:31,331 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/69578324487048128b6df6621b233868, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/7e05393a6565481db29992797f4b5d6f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/4e0c7fbe40e94cb29620d3db52d680e1] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=100.0 K 2024-11-25T17:10:31,331 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:31,331 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/69578324487048128b6df6621b233868, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/7e05393a6565481db29992797f4b5d6f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/4e0c7fbe40e94cb29620d3db52d680e1] 2024-11-25T17:10:31,332 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 69578324487048128b6df6621b233868, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732554628117 2024-11-25T17:10:31,332 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e05393a6565481db29992797f4b5d6f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732554628232 2024-11-25T17:10:31,332 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e0c7fbe40e94cb29620d3db52d680e1, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732554630394 2024-11-25T17:10:31,333 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:31,337 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:31,337 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/B is initiating minor compaction (all files) 2024-11-25T17:10:31,338 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/B in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:31,338 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1376ad1d2db34770a7f9f66ce6790081, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/38af93d56bb941a0a3235705f14b1244, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/13de382ece3b42bcbaaf4830f18fce60] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=36.0 K 2024-11-25T17:10:31,338 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1376ad1d2db34770a7f9f66ce6790081, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732554628117 2024-11-25T17:10:31,338 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38af93d56bb941a0a3235705f14b1244, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732554628232 2024-11-25T17:10:31,339 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13de382ece3b42bcbaaf4830f18fce60, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732554630409 2024-11-25T17:10:31,357 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:31,363 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-25T17:10:31,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:31,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:31,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:31,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:31,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:31,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:31,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:31,367 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#B#compaction#551 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:31,368 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/54b8c2fd1fc3464c917cd8ec356252ce is 50, key is test_row_0/B:col10/1732554630409/Put/seqid=0 2024-11-25T17:10:31,370 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411257250be15e88748c0b918dfd15730791e_c2213e06f0c6c3750162aafa4b26c5ef store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:31,372 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411257250be15e88748c0b918dfd15730791e_c2213e06f0c6c3750162aafa4b26c5ef, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:31,372 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411257250be15e88748c0b918dfd15730791e_c2213e06f0c6c3750162aafa4b26c5ef because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:31,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411252e89429ecad0438bbf4a546cf2dfb99e_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554631361/Put/seqid=0 2024-11-25T17:10:31,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742470_1646 (size=12663) 2024-11-25T17:10:31,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742471_1647 (size=4469) 2024-11-25T17:10:31,383 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#A#compaction#550 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:31,384 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/fd2fa8b58bdf4777ba853abe3ff269be is 175, key is test_row_0/A:col10/1732554630409/Put/seqid=0 2024-11-25T17:10:31,386 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/54b8c2fd1fc3464c917cd8ec356252ce as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/54b8c2fd1fc3464c917cd8ec356252ce 2024-11-25T17:10:31,391 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/B of c2213e06f0c6c3750162aafa4b26c5ef into 54b8c2fd1fc3464c917cd8ec356252ce(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:31,391 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:31,391 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/B, priority=13, startTime=1732554631330; duration=0sec 2024-11-25T17:10:31,391 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:31,391 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:B 2024-11-25T17:10:31,391 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:31,392 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:31,392 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/C is initiating minor compaction (all files) 2024-11-25T17:10:31,392 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/C in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:31,392 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/db2278dd928e4945a0bada9ae33bdb7c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/055ce72fb4eb403a9317df8d79fc49a1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b0be6a84090348c78152c616cc862f92] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=36.0 K 2024-11-25T17:10:31,393 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting db2278dd928e4945a0bada9ae33bdb7c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732554628117 2024-11-25T17:10:31,394 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554691389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554691390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554691392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,395 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 055ce72fb4eb403a9317df8d79fc49a1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732554628232 2024-11-25T17:10:31,395 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0be6a84090348c78152c616cc862f92, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732554630409 2024-11-25T17:10:31,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554691394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554691394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742472_1648 (size=14794) 2024-11-25T17:10:31,413 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:31,416 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411252e89429ecad0438bbf4a546cf2dfb99e_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411252e89429ecad0438bbf4a546cf2dfb99e_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:31,416 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/a38f5626aa914424a2a547d1a7fd70f8, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:31,417 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/a38f5626aa914424a2a547d1a7fd70f8 is 175, key is test_row_0/A:col10/1732554631361/Put/seqid=0 2024-11-25T17:10:31,419 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#C#compaction#553 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:31,420 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/a043eb7534ef416dab08878287c45229 is 50, key is test_row_0/C:col10/1732554630409/Put/seqid=0 2024-11-25T17:10:31,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742474_1650 (size=39749) 2024-11-25T17:10:31,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742473_1649 (size=31617) 2024-11-25T17:10:31,433 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/fd2fa8b58bdf4777ba853abe3ff269be as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/fd2fa8b58bdf4777ba853abe3ff269be 2024-11-25T17:10:31,437 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/A of c2213e06f0c6c3750162aafa4b26c5ef into fd2fa8b58bdf4777ba853abe3ff269be(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:31,437 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:31,437 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/A, priority=13, startTime=1732554631330; duration=0sec 2024-11-25T17:10:31,437 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:31,437 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:A 2024-11-25T17:10:31,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742475_1651 (size=12663) 2024-11-25T17:10:31,453 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/a043eb7534ef416dab08878287c45229 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/a043eb7534ef416dab08878287c45229 2024-11-25T17:10:31,459 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/C of c2213e06f0c6c3750162aafa4b26c5ef into a043eb7534ef416dab08878287c45229(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:31,459 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:31,459 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/C, priority=13, startTime=1732554631330; duration=0sec 2024-11-25T17:10:31,459 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:31,459 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:C 2024-11-25T17:10:31,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554691495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554691498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554691498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554691506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554691509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554691701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554691702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554691702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554691709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:31,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554691723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:31,821 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/a38f5626aa914424a2a547d1a7fd70f8 2024-11-25T17:10:31,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/3b6ac6520cce450d811965be0c2b748f is 50, key is test_row_0/B:col10/1732554631361/Put/seqid=0 2024-11-25T17:10:31,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742476_1652 (size=12151) 2024-11-25T17:10:31,864 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/3b6ac6520cce450d811965be0c2b748f 2024-11-25T17:10:31,888 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/a92ef0df6b1342b3920e61857804f265 is 50, key is test_row_0/C:col10/1732554631361/Put/seqid=0 2024-11-25T17:10:31,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742477_1653 (size=12151) 2024-11-25T17:10:31,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/a92ef0df6b1342b3920e61857804f265 2024-11-25T17:10:31,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/a38f5626aa914424a2a547d1a7fd70f8 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a38f5626aa914424a2a547d1a7fd70f8 2024-11-25T17:10:31,942 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a38f5626aa914424a2a547d1a7fd70f8, entries=200, sequenceid=253, filesize=38.8 K 2024-11-25T17:10:31,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/3b6ac6520cce450d811965be0c2b748f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3b6ac6520cce450d811965be0c2b748f 2024-11-25T17:10:31,947 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3b6ac6520cce450d811965be0c2b748f, entries=150, sequenceid=253, filesize=11.9 K 2024-11-25T17:10:31,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/a92ef0df6b1342b3920e61857804f265 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/a92ef0df6b1342b3920e61857804f265 2024-11-25T17:10:31,957 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/a92ef0df6b1342b3920e61857804f265, entries=150, sequenceid=253, filesize=11.9 K 2024-11-25T17:10:31,958 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for c2213e06f0c6c3750162aafa4b26c5ef in 595ms, sequenceid=253, compaction requested=false 2024-11-25T17:10:31,958 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:32,009 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-25T17:10:32,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:32,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:32,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:32,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:32,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:32,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:32,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:32,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554692020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554692022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554692024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554692026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554692027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,037 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411251c94b731ac9f42bea1a862016291f690_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554632007/Put/seqid=0 2024-11-25T17:10:32,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742478_1654 (size=14994) 2024-11-25T17:10:32,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554692130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,139 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554692134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554692134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554692134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554692346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554692346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554692346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554692349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-25T17:10:32,410 INFO [Thread-2582 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-25T17:10:32,420 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:10:32,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-25T17:10:32,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-25T17:10:32,421 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:10:32,422 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:10:32,422 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:10:32,498 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:32,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-25T17:10:32,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554692534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,545 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411251c94b731ac9f42bea1a862016291f690_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411251c94b731ac9f42bea1a862016291f690_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:32,553 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/35496487940d4ef6a2a4be9dc95420fe, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:32,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/35496487940d4ef6a2a4be9dc95420fe is 175, key is test_row_0/A:col10/1732554632007/Put/seqid=0 2024-11-25T17:10:32,573 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:32,577 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-25T17:10:32,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:32,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:32,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:32,581 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:32,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:32,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:32,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742479_1655 (size=39949) 2024-11-25T17:10:32,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554692658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554692658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554692658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:32,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554692666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:32,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-25T17:10:32,734 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:32,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-25T17:10:32,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:32,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:32,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:32,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:32,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:32,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:32,887 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:32,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-25T17:10:32,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:32,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:32,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:32,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:32,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:32,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:33,010 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=279, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/35496487940d4ef6a2a4be9dc95420fe 2024-11-25T17:10:33,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/da472b75c7dc4e9f9ad74cca24fadfe8 is 50, key is test_row_0/B:col10/1732554632007/Put/seqid=0 2024-11-25T17:10:33,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-25T17:10:33,041 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:33,041 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-25T17:10:33,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:33,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:33,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:33,041 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:33,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:33,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:33,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742480_1656 (size=12301) 2024-11-25T17:10:33,071 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/da472b75c7dc4e9f9ad74cca24fadfe8 2024-11-25T17:10:33,083 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/eeedc778609146a7b0440f26b7785aa2 is 50, key is test_row_0/C:col10/1732554632007/Put/seqid=0 2024-11-25T17:10:33,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742481_1657 (size=12301) 2024-11-25T17:10:33,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/eeedc778609146a7b0440f26b7785aa2 2024-11-25T17:10:33,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/35496487940d4ef6a2a4be9dc95420fe as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/35496487940d4ef6a2a4be9dc95420fe 2024-11-25T17:10:33,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/35496487940d4ef6a2a4be9dc95420fe, entries=200, sequenceid=279, filesize=39.0 K 2024-11-25T17:10:33,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/da472b75c7dc4e9f9ad74cca24fadfe8 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/da472b75c7dc4e9f9ad74cca24fadfe8 2024-11-25T17:10:33,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/da472b75c7dc4e9f9ad74cca24fadfe8, entries=150, sequenceid=279, filesize=12.0 K 2024-11-25T17:10:33,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/eeedc778609146a7b0440f26b7785aa2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/eeedc778609146a7b0440f26b7785aa2 2024-11-25T17:10:33,139 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/eeedc778609146a7b0440f26b7785aa2, entries=150, sequenceid=279, filesize=12.0 K 2024-11-25T17:10:33,140 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for c2213e06f0c6c3750162aafa4b26c5ef in 1131ms, sequenceid=279, compaction requested=true 2024-11-25T17:10:33,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:33,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:33,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:33,140 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:33,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:33,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:33,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:33,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-25T17:10:33,140 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:33,141 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111315 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:33,141 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/A is initiating minor compaction (all files) 2024-11-25T17:10:33,141 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:33,141 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/A in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:33,141 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/B is initiating minor compaction (all files) 2024-11-25T17:10:33,141 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/B in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:33,141 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/fd2fa8b58bdf4777ba853abe3ff269be, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a38f5626aa914424a2a547d1a7fd70f8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/35496487940d4ef6a2a4be9dc95420fe] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=108.7 K 2024-11-25T17:10:33,141 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:33,141 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/54b8c2fd1fc3464c917cd8ec356252ce, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3b6ac6520cce450d811965be0c2b748f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/da472b75c7dc4e9f9ad74cca24fadfe8] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=36.2 K 2024-11-25T17:10:33,141 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/fd2fa8b58bdf4777ba853abe3ff269be, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a38f5626aa914424a2a547d1a7fd70f8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/35496487940d4ef6a2a4be9dc95420fe] 2024-11-25T17:10:33,143 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd2fa8b58bdf4777ba853abe3ff269be, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732554630409 2024-11-25T17:10:33,143 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 54b8c2fd1fc3464c917cd8ec356252ce, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732554630409 2024-11-25T17:10:33,144 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting a38f5626aa914424a2a547d1a7fd70f8, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732554630742 2024-11-25T17:10:33,145 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b6ac6520cce450d811965be0c2b748f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732554630742 2024-11-25T17:10:33,145 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35496487940d4ef6a2a4be9dc95420fe, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732554631386 2024-11-25T17:10:33,146 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting da472b75c7dc4e9f9ad74cca24fadfe8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732554631386 2024-11-25T17:10:33,155 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:33,155 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#B#compaction#559 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:33,156 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/467568364aa64c399e105de36d6f1993 is 50, key is test_row_0/B:col10/1732554632007/Put/seqid=0 2024-11-25T17:10:33,160 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112557e67627f1444a7c90af033038ec29d4_c2213e06f0c6c3750162aafa4b26c5ef store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:33,162 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112557e67627f1444a7c90af033038ec29d4_c2213e06f0c6c3750162aafa4b26c5ef, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:33,162 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112557e67627f1444a7c90af033038ec29d4_c2213e06f0c6c3750162aafa4b26c5ef because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:33,165 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-25T17:10:33,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:33,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:33,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:33,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:33,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:33,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:33,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:33,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125f5994023b51b42268f7b180e6de26642_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554632023/Put/seqid=0 2024-11-25T17:10:33,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742482_1658 (size=12915) 2024-11-25T17:10:33,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742484_1660 (size=14994) 2024-11-25T17:10:33,183 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:33,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742483_1659 (size=4469) 2024-11-25T17:10:33,185 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#A#compaction#560 average throughput is 0.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:33,186 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/42e9bcb6ec5b4a37b0753c545164f519 is 175, key is test_row_0/A:col10/1732554632007/Put/seqid=0 2024-11-25T17:10:33,188 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125f5994023b51b42268f7b180e6de26642_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125f5994023b51b42268f7b180e6de26642_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:33,188 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/c05db277e4c0418fa597315c4397f791, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:33,189 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/c05db277e4c0418fa597315c4397f791 is 175, key is test_row_0/A:col10/1732554632023/Put/seqid=0 2024-11-25T17:10:33,193 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:33,194 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-25T17:10:33,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:33,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:33,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:33,194 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:33,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:33,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:33,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554693195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554693196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554693197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554693198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742485_1661 (size=31869) 2024-11-25T17:10:33,220 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/42e9bcb6ec5b4a37b0753c545164f519 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/42e9bcb6ec5b4a37b0753c545164f519 2024-11-25T17:10:33,228 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/A of c2213e06f0c6c3750162aafa4b26c5ef into 42e9bcb6ec5b4a37b0753c545164f519(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:33,228 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:33,228 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/A, priority=13, startTime=1732554633140; duration=0sec 2024-11-25T17:10:33,228 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:33,228 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:A 2024-11-25T17:10:33,228 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:33,230 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:33,230 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/C is initiating minor compaction (all files) 2024-11-25T17:10:33,230 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/C in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:33,230 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/a043eb7534ef416dab08878287c45229, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/a92ef0df6b1342b3920e61857804f265, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/eeedc778609146a7b0440f26b7785aa2] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=36.2 K 2024-11-25T17:10:33,235 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting a043eb7534ef416dab08878287c45229, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732554630409 2024-11-25T17:10:33,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742486_1662 (size=39949) 2024-11-25T17:10:33,235 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting a92ef0df6b1342b3920e61857804f265, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732554630742 2024-11-25T17:10:33,236 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting eeedc778609146a7b0440f26b7785aa2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732554631386 2024-11-25T17:10:33,244 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#C#compaction#562 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:33,244 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/45def14e4d034a6f8042d57519a23bc6 is 50, key is test_row_0/C:col10/1732554632007/Put/seqid=0 2024-11-25T17:10:33,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742487_1663 (size=12915) 2024-11-25T17:10:33,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554693299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,303 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554693300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554693301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554693302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,346 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:33,347 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-25T17:10:33,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:33,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:33,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:33,347 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:33,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:33,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:33,500 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:33,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-25T17:10:33,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:33,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:33,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:33,500 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:33,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:33,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:33,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554693504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554693505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554693505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554693506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-25T17:10:33,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554693551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,584 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/467568364aa64c399e105de36d6f1993 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/467568364aa64c399e105de36d6f1993 2024-11-25T17:10:33,590 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/B of c2213e06f0c6c3750162aafa4b26c5ef into 467568364aa64c399e105de36d6f1993(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:33,590 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:33,590 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/B, priority=13, startTime=1732554633140; duration=0sec 2024-11-25T17:10:33,590 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:33,590 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:B 2024-11-25T17:10:33,636 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/c05db277e4c0418fa597315c4397f791 2024-11-25T17:10:33,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/ef0ac4b22aca4ee2a94f4315f2a801a2 is 50, key is test_row_0/B:col10/1732554632023/Put/seqid=0 2024-11-25T17:10:33,653 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:33,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-25T17:10:33,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:33,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:33,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:33,653 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:33,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:33,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:33,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742488_1664 (size=12301) 2024-11-25T17:10:33,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/ef0ac4b22aca4ee2a94f4315f2a801a2 2024-11-25T17:10:33,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/b04ff3e5c20242609c5a34d71555b0d4 is 50, key is test_row_0/C:col10/1732554632023/Put/seqid=0 2024-11-25T17:10:33,689 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/45def14e4d034a6f8042d57519a23bc6 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/45def14e4d034a6f8042d57519a23bc6 2024-11-25T17:10:33,694 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/C of c2213e06f0c6c3750162aafa4b26c5ef into 45def14e4d034a6f8042d57519a23bc6(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:33,694 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:33,694 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/C, priority=13, startTime=1732554633140; duration=0sec 2024-11-25T17:10:33,694 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:33,695 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:C 2024-11-25T17:10:33,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742489_1665 (size=12301) 2024-11-25T17:10:33,699 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/b04ff3e5c20242609c5a34d71555b0d4 2024-11-25T17:10:33,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/c05db277e4c0418fa597315c4397f791 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/c05db277e4c0418fa597315c4397f791 2024-11-25T17:10:33,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/c05db277e4c0418fa597315c4397f791, entries=200, sequenceid=293, filesize=39.0 K 2024-11-25T17:10:33,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/ef0ac4b22aca4ee2a94f4315f2a801a2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/ef0ac4b22aca4ee2a94f4315f2a801a2 2024-11-25T17:10:33,717 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/ef0ac4b22aca4ee2a94f4315f2a801a2, entries=150, sequenceid=293, filesize=12.0 K 2024-11-25T17:10:33,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/b04ff3e5c20242609c5a34d71555b0d4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b04ff3e5c20242609c5a34d71555b0d4 2024-11-25T17:10:33,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b04ff3e5c20242609c5a34d71555b0d4, entries=150, sequenceid=293, filesize=12.0 K 2024-11-25T17:10:33,730 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for c2213e06f0c6c3750162aafa4b26c5ef in 565ms, sequenceid=293, compaction requested=false 2024-11-25T17:10:33,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:33,806 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:33,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-25T17:10:33,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:33,806 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-25T17:10:33,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:33,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:33,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:33,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:33,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:33,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:33,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:33,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:33,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125b817011f47e244d98ad7cd13ae5c1f95_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554633196/Put/seqid=0 2024-11-25T17:10:33,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554693823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554693825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,833 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554693830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554693830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742490_1666 (size=12454) 2024-11-25T17:10:33,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:33,863 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125b817011f47e244d98ad7cd13ae5c1f95_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125b817011f47e244d98ad7cd13ae5c1f95_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:33,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/f5158eb2fe9a450bba3214d0c594d222, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:33,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/f5158eb2fe9a450bba3214d0c594d222 is 175, key is test_row_0/A:col10/1732554633196/Put/seqid=0 2024-11-25T17:10:33,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742491_1667 (size=31255) 2024-11-25T17:10:33,907 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=318, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/f5158eb2fe9a450bba3214d0c594d222 2024-11-25T17:10:33,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/ae69b247400c49b7bee317db20d62dec is 50, key is test_row_0/B:col10/1732554633196/Put/seqid=0 2024-11-25T17:10:33,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554693929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554693931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554693935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:33,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554693935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:33,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742492_1668 (size=12301) 2024-11-25T17:10:33,961 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/ae69b247400c49b7bee317db20d62dec 2024-11-25T17:10:33,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/b3a13625e09c4926a63f5a66bf5e18fd is 50, key is test_row_0/C:col10/1732554633196/Put/seqid=0 2024-11-25T17:10:34,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742493_1669 (size=12301) 2024-11-25T17:10:34,022 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/b3a13625e09c4926a63f5a66bf5e18fd 2024-11-25T17:10:34,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/f5158eb2fe9a450bba3214d0c594d222 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/f5158eb2fe9a450bba3214d0c594d222 2024-11-25T17:10:34,029 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/f5158eb2fe9a450bba3214d0c594d222, entries=150, sequenceid=318, filesize=30.5 K 2024-11-25T17:10:34,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/ae69b247400c49b7bee317db20d62dec as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/ae69b247400c49b7bee317db20d62dec 2024-11-25T17:10:34,033 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/ae69b247400c49b7bee317db20d62dec, entries=150, sequenceid=318, filesize=12.0 K 2024-11-25T17:10:34,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/b3a13625e09c4926a63f5a66bf5e18fd as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b3a13625e09c4926a63f5a66bf5e18fd 2024-11-25T17:10:34,037 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b3a13625e09c4926a63f5a66bf5e18fd, entries=150, sequenceid=318, filesize=12.0 K 2024-11-25T17:10:34,038 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for c2213e06f0c6c3750162aafa4b26c5ef in 232ms, sequenceid=318, compaction requested=true 2024-11-25T17:10:34,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:34,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:34,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-25T17:10:34,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-25T17:10:34,041 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-25T17:10:34,041 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6170 sec 2024-11-25T17:10:34,042 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 1.6210 sec 2024-11-25T17:10:34,136 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-25T17:10:34,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:34,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:34,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:34,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:34,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:34,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:34,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:34,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411254ef1432f3d284769b28ce1a0eb49f538_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554634136/Put/seqid=0 2024-11-25T17:10:34,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742494_1670 (size=14994) 2024-11-25T17:10:34,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:34,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554694163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:34,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:34,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:34,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554694163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:34,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554694164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:34,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:34,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554694164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:34,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:34,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554694270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:34,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:34,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554694270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:34,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:34,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554694270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:34,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:34,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554694270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:34,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:34,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554694473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:34,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:34,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554694473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:34,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:34,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554694474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:34,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:34,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554694474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:34,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-25T17:10:34,535 INFO [Thread-2582 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-25T17:10:34,536 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:10:34,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-11-25T17:10:34,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-25T17:10:34,538 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:10:34,539 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:10:34,539 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:10:34,568 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:34,572 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411254ef1432f3d284769b28ce1a0eb49f538_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411254ef1432f3d284769b28ce1a0eb49f538_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:34,573 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/3628d08c58ac47658cdb3e1d7cfa318e, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:34,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/3628d08c58ac47658cdb3e1d7cfa318e is 175, key is test_row_0/A:col10/1732554634136/Put/seqid=0 2024-11-25T17:10:34,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742495_1671 (size=39949) 2024-11-25T17:10:34,593 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=335, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/3628d08c58ac47658cdb3e1d7cfa318e 2024-11-25T17:10:34,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/43a92d559cb34602a2e33ce71c0f0a25 is 50, key is test_row_0/B:col10/1732554634136/Put/seqid=0 2024-11-25T17:10:34,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742496_1672 (size=12301) 2024-11-25T17:10:34,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-25T17:10:34,692 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:34,693 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-25T17:10:34,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:34,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:34,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:34,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:34,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:34,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:34,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:34,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554694779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:34,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:34,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554694780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:34,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:34,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554694780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:34,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:34,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554694780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:34,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-25T17:10:34,845 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:34,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-25T17:10:34,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:34,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:34,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:34,846 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:34,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:34,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:34,998 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:35,006 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-25T17:10:35,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:35,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,006 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/43a92d559cb34602a2e33ce71c0f0a25 2024-11-25T17:10:35,112 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/71dfedee29e94ee68c875a68c3125c90 is 50, key is test_row_0/C:col10/1732554634136/Put/seqid=0 2024-11-25T17:10:35,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-25T17:10:35,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742497_1673 (size=12301) 2024-11-25T17:10:35,167 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:35,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-25T17:10:35,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:35,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,168 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,173 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/71dfedee29e94ee68c875a68c3125c90 2024-11-25T17:10:35,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/3628d08c58ac47658cdb3e1d7cfa318e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3628d08c58ac47658cdb3e1d7cfa318e 2024-11-25T17:10:35,218 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3628d08c58ac47658cdb3e1d7cfa318e, entries=200, sequenceid=335, filesize=39.0 K 2024-11-25T17:10:35,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/43a92d559cb34602a2e33ce71c0f0a25 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/43a92d559cb34602a2e33ce71c0f0a25 2024-11-25T17:10:35,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/43a92d559cb34602a2e33ce71c0f0a25, entries=150, sequenceid=335, filesize=12.0 K 2024-11-25T17:10:35,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/71dfedee29e94ee68c875a68c3125c90 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/71dfedee29e94ee68c875a68c3125c90 2024-11-25T17:10:35,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/71dfedee29e94ee68c875a68c3125c90, entries=150, sequenceid=335, filesize=12.0 K 2024-11-25T17:10:35,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for c2213e06f0c6c3750162aafa4b26c5ef in 1107ms, sequenceid=335, compaction requested=true 2024-11-25T17:10:35,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:35,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:35,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:35,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:35,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-25T17:10:35,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:35,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-25T17:10:35,243 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:35,244 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:35,257 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:35,257 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/C is initiating minor compaction (all files) 2024-11-25T17:10:35,258 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/C in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,258 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/45def14e4d034a6f8042d57519a23bc6, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b04ff3e5c20242609c5a34d71555b0d4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b3a13625e09c4926a63f5a66bf5e18fd, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/71dfedee29e94ee68c875a68c3125c90] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=48.7 K 2024-11-25T17:10:35,258 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:35,258 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/A is initiating minor compaction (all files) 2024-11-25T17:10:35,258 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/A in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,258 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/42e9bcb6ec5b4a37b0753c545164f519, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/c05db277e4c0418fa597315c4397f791, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/f5158eb2fe9a450bba3214d0c594d222, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3628d08c58ac47658cdb3e1d7cfa318e] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=139.7 K 2024-11-25T17:10:35,258 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,258 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/42e9bcb6ec5b4a37b0753c545164f519, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/c05db277e4c0418fa597315c4397f791, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/f5158eb2fe9a450bba3214d0c594d222, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3628d08c58ac47658cdb3e1d7cfa318e] 2024-11-25T17:10:35,259 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42e9bcb6ec5b4a37b0753c545164f519, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732554631386 2024-11-25T17:10:35,259 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 45def14e4d034a6f8042d57519a23bc6, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732554631386 2024-11-25T17:10:35,259 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting b04ff3e5c20242609c5a34d71555b0d4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732554632015 2024-11-25T17:10:35,259 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting c05db277e4c0418fa597315c4397f791, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732554632015 2024-11-25T17:10:35,260 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting b3a13625e09c4926a63f5a66bf5e18fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732554633192 2024-11-25T17:10:35,260 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5158eb2fe9a450bba3214d0c594d222, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732554633192 2024-11-25T17:10:35,260 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 71dfedee29e94ee68c875a68c3125c90, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732554633825 2024-11-25T17:10:35,261 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3628d08c58ac47658cdb3e1d7cfa318e, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732554633825 2024-11-25T17:10:35,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:35,295 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-25T17:10:35,296 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#C#compaction#571 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:35,297 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/fe97a8d3166643ff9a4b92a56b1b0533 is 50, key is test_row_0/C:col10/1732554634136/Put/seqid=0 2024-11-25T17:10:35,298 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:35,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:35,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:35,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:35,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:35,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:35,301 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:35,333 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:35,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-25T17:10:35,335 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241125002d916f1c44437aac2b6ea0a0472621_c2213e06f0c6c3750162aafa4b26c5ef store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:35,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:35,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742498_1674 (size=13051) 2024-11-25T17:10:35,337 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241125002d916f1c44437aac2b6ea0a0472621_c2213e06f0c6c3750162aafa4b26c5ef, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:35,338 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125002d916f1c44437aac2b6ea0a0472621_c2213e06f0c6c3750162aafa4b26c5ef because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:35,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:35,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554695324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:35,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:35,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554695336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:35,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:35,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554695339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:35,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:35,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554695339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:35,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112567f9f596746f4e5b957e8599d5e29ff4_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554634161/Put/seqid=0 2024-11-25T17:10:35,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742499_1675 (size=4469) 2024-11-25T17:10:35,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742500_1676 (size=12454) 2024-11-25T17:10:35,439 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:35,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:35,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554695440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:35,449 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112567f9f596746f4e5b957e8599d5e29ff4_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112567f9f596746f4e5b957e8599d5e29ff4_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:35,450 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/4f4c8f14ad0e4447aa622a3310be02ee, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:35,451 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/4f4c8f14ad0e4447aa622a3310be02ee is 175, key is test_row_0/A:col10/1732554634161/Put/seqid=0 2024-11-25T17:10:35,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:35,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554695454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:35,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:35,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554695454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:35,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:35,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554695454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:35,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742501_1677 (size=31255) 2024-11-25T17:10:35,463 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=358, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/4f4c8f14ad0e4447aa622a3310be02ee 2024-11-25T17:10:35,471 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/b29b52d37e4b4632b1fd6cef1d8c6ffb is 50, key is test_row_0/B:col10/1732554634161/Put/seqid=0 2024-11-25T17:10:35,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742502_1678 (size=12301) 2024-11-25T17:10:35,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/b29b52d37e4b4632b1fd6cef1d8c6ffb 2024-11-25T17:10:35,492 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:35,493 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-25T17:10:35,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:35,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,493 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,495 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/e0f56b019aa649c79e1698e5e7200504 is 50, key is test_row_0/C:col10/1732554634161/Put/seqid=0 2024-11-25T17:10:35,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742503_1679 (size=12301) 2024-11-25T17:10:35,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:35,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554695562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:35,566 DEBUG [Thread-2580 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., hostname=6579369734b6,41865,1732554474464, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:10:35,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-25T17:10:35,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:35,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554695645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:35,648 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:35,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-25T17:10:35,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:35,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:35,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554695656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:35,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:35,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554695657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:35,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:35,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554695657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:35,746 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/fe97a8d3166643ff9a4b92a56b1b0533 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/fe97a8d3166643ff9a4b92a56b1b0533 2024-11-25T17:10:35,754 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/C of c2213e06f0c6c3750162aafa4b26c5ef into fe97a8d3166643ff9a4b92a56b1b0533(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:35,754 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:35,754 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/C, priority=12, startTime=1732554635243; duration=0sec 2024-11-25T17:10:35,754 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:35,754 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:C 2024-11-25T17:10:35,754 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:35,756 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:35,756 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/B is initiating minor compaction (all files) 2024-11-25T17:10:35,756 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/B in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,756 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/467568364aa64c399e105de36d6f1993, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/ef0ac4b22aca4ee2a94f4315f2a801a2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/ae69b247400c49b7bee317db20d62dec, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/43a92d559cb34602a2e33ce71c0f0a25] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=48.7 K 2024-11-25T17:10:35,756 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 467568364aa64c399e105de36d6f1993, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732554631386 2024-11-25T17:10:35,757 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting ef0ac4b22aca4ee2a94f4315f2a801a2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732554632015 2024-11-25T17:10:35,757 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting ae69b247400c49b7bee317db20d62dec, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732554633192 2024-11-25T17:10:35,757 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 43a92d559cb34602a2e33ce71c0f0a25, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732554633825 2024-11-25T17:10:35,770 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#B#compaction#576 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:35,770 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/7690dbeb912042e5b3b8a55374bba499 is 50, key is test_row_0/B:col10/1732554634136/Put/seqid=0 2024-11-25T17:10:35,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742504_1680 (size=13051) 2024-11-25T17:10:35,787 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/7690dbeb912042e5b3b8a55374bba499 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/7690dbeb912042e5b3b8a55374bba499 2024-11-25T17:10:35,791 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/B of c2213e06f0c6c3750162aafa4b26c5ef into 7690dbeb912042e5b3b8a55374bba499(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:35,791 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:35,791 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/B, priority=12, startTime=1732554635243; duration=0sec 2024-11-25T17:10:35,791 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:35,791 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:B 2024-11-25T17:10:35,801 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:35,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-25T17:10:35,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:35,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,803 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,820 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#A#compaction#572 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:35,821 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/3193e065328a408eaacf40f2d5ce14ac is 175, key is test_row_0/A:col10/1732554634136/Put/seqid=0 2024-11-25T17:10:35,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742505_1681 (size=32005) 2024-11-25T17:10:35,830 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/3193e065328a408eaacf40f2d5ce14ac as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3193e065328a408eaacf40f2d5ce14ac 2024-11-25T17:10:35,834 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/A of c2213e06f0c6c3750162aafa4b26c5ef into 3193e065328a408eaacf40f2d5ce14ac(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:35,834 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:35,834 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/A, priority=12, startTime=1732554635243; duration=0sec 2024-11-25T17:10:35,834 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:35,834 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:A 2024-11-25T17:10:35,941 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/e0f56b019aa649c79e1698e5e7200504 2024-11-25T17:10:35,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/4f4c8f14ad0e4447aa622a3310be02ee as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/4f4c8f14ad0e4447aa622a3310be02ee 2024-11-25T17:10:35,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:35,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554695949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:35,952 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/4f4c8f14ad0e4447aa622a3310be02ee, entries=150, sequenceid=358, filesize=30.5 K 2024-11-25T17:10:35,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/b29b52d37e4b4632b1fd6cef1d8c6ffb as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/b29b52d37e4b4632b1fd6cef1d8c6ffb 2024-11-25T17:10:35,955 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:35,957 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-25T17:10:35,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:35,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:35,958 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,958 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/b29b52d37e4b4632b1fd6cef1d8c6ffb, entries=150, sequenceid=358, filesize=12.0 K 2024-11-25T17:10:35,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/e0f56b019aa649c79e1698e5e7200504 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/e0f56b019aa649c79e1698e5e7200504 2024-11-25T17:10:35,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:35,966 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/e0f56b019aa649c79e1698e5e7200504, entries=150, sequenceid=358, filesize=12.0 K 2024-11-25T17:10:35,967 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for c2213e06f0c6c3750162aafa4b26c5ef in 672ms, sequenceid=358, compaction requested=false 2024-11-25T17:10:35,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:35,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:35,975 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-25T17:10:35,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:35,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:35,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:35,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:35,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:35,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:36,004 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125f9ccc7622db54b7b85ea019807909624_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554635334/Put/seqid=0 2024-11-25T17:10:36,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742506_1682 (size=14994) 2024-11-25T17:10:36,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554696041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554696042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554696046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,113 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:36,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-25T17:10:36,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:36,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:36,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:36,117 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:36,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:36,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:36,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554696147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554696147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554696149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,269 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:36,270 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-25T17:10:36,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:36,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:36,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:36,270 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:36,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:36,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:36,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554696352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554696353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554696354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,410 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:36,414 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125f9ccc7622db54b7b85ea019807909624_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125f9ccc7622db54b7b85ea019807909624_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:36,415 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/639884beda844163926c583adfb7a145, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:36,418 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/639884beda844163926c583adfb7a145 is 175, key is test_row_0/A:col10/1732554635334/Put/seqid=0 2024-11-25T17:10:36,422 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:36,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-25T17:10:36,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:36,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:36,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:36,423 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:36,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:36,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:36,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742507_1683 (size=39949) 2024-11-25T17:10:36,439 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=375, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/639884beda844163926c583adfb7a145 2024-11-25T17:10:36,451 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/a4ab0d0475f34e12848d89e5e1ee674c is 50, key is test_row_0/B:col10/1732554635334/Put/seqid=0 2024-11-25T17:10:36,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554696452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742508_1684 (size=12301) 2024-11-25T17:10:36,468 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/a4ab0d0475f34e12848d89e5e1ee674c 2024-11-25T17:10:36,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/530898b084f0490c91d2f5260fbb6b34 is 50, key is test_row_0/C:col10/1732554635334/Put/seqid=0 2024-11-25T17:10:36,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742509_1685 (size=12301) 2024-11-25T17:10:36,483 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/530898b084f0490c91d2f5260fbb6b34 2024-11-25T17:10:36,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/639884beda844163926c583adfb7a145 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/639884beda844163926c583adfb7a145 2024-11-25T17:10:36,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/639884beda844163926c583adfb7a145, entries=200, sequenceid=375, filesize=39.0 K 2024-11-25T17:10:36,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/a4ab0d0475f34e12848d89e5e1ee674c as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/a4ab0d0475f34e12848d89e5e1ee674c 2024-11-25T17:10:36,494 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/a4ab0d0475f34e12848d89e5e1ee674c, entries=150, sequenceid=375, filesize=12.0 K 2024-11-25T17:10:36,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/530898b084f0490c91d2f5260fbb6b34 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/530898b084f0490c91d2f5260fbb6b34 2024-11-25T17:10:36,498 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/530898b084f0490c91d2f5260fbb6b34, entries=150, sequenceid=375, filesize=12.0 K 2024-11-25T17:10:36,499 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for c2213e06f0c6c3750162aafa4b26c5ef in 524ms, sequenceid=375, compaction requested=true 2024-11-25T17:10:36,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:36,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:36,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:36,499 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:36,499 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:36,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:36,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:36,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:36,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:36,500 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:36,500 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:36,500 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/B is initiating minor compaction (all files) 2024-11-25T17:10:36,500 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/A is initiating minor compaction (all files) 2024-11-25T17:10:36,500 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/B in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:36,500 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/A in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:36,500 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/7690dbeb912042e5b3b8a55374bba499, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/b29b52d37e4b4632b1fd6cef1d8c6ffb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/a4ab0d0475f34e12848d89e5e1ee674c] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=36.8 K 2024-11-25T17:10:36,500 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3193e065328a408eaacf40f2d5ce14ac, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/4f4c8f14ad0e4447aa622a3310be02ee, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/639884beda844163926c583adfb7a145] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=100.8 K 2024-11-25T17:10:36,500 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:36,500 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3193e065328a408eaacf40f2d5ce14ac, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/4f4c8f14ad0e4447aa622a3310be02ee, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/639884beda844163926c583adfb7a145] 2024-11-25T17:10:36,500 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 7690dbeb912042e5b3b8a55374bba499, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732554633825 2024-11-25T17:10:36,501 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3193e065328a408eaacf40f2d5ce14ac, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732554633825 2024-11-25T17:10:36,501 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f4c8f14ad0e4447aa622a3310be02ee, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732554634161 2024-11-25T17:10:36,501 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting b29b52d37e4b4632b1fd6cef1d8c6ffb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732554634161 2024-11-25T17:10:36,508 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting a4ab0d0475f34e12848d89e5e1ee674c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732554635322 2024-11-25T17:10:36,508 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 639884beda844163926c583adfb7a145, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732554635322 2024-11-25T17:10:36,548 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#B#compaction#580 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:36,548 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/a7d45af9308d4bba8d159f847a3c1bdc is 50, key is test_row_0/B:col10/1732554635334/Put/seqid=0 2024-11-25T17:10:36,553 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:36,557 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241125cb11be6a72bd48969c478ab2ba81b313_c2213e06f0c6c3750162aafa4b26c5ef store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:36,559 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241125cb11be6a72bd48969c478ab2ba81b313_c2213e06f0c6c3750162aafa4b26c5ef, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:36,559 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125cb11be6a72bd48969c478ab2ba81b313_c2213e06f0c6c3750162aafa4b26c5ef because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:36,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742511_1687 (size=4469) 2024-11-25T17:10:36,574 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:36,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-25T17:10:36,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:36,575 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#A#compaction#581 average throughput is 1.11 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:36,575 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-25T17:10:36,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:36,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:36,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742510_1686 (size=13153) 2024-11-25T17:10:36,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:36,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:36,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:36,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:36,576 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/a2eaaac2005845f0acb9d4c9c432f3fa is 175, key is test_row_0/A:col10/1732554635334/Put/seqid=0 2024-11-25T17:10:36,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125fc9e05c6fc1d4830a010626f87fa1ba2_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554636006/Put/seqid=0 2024-11-25T17:10:36,585 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/a7d45af9308d4bba8d159f847a3c1bdc as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/a7d45af9308d4bba8d159f847a3c1bdc 2024-11-25T17:10:36,590 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/B of c2213e06f0c6c3750162aafa4b26c5ef into a7d45af9308d4bba8d159f847a3c1bdc(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:36,590 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:36,590 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/B, priority=13, startTime=1732554636499; duration=0sec 2024-11-25T17:10:36,591 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:36,591 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:B 2024-11-25T17:10:36,591 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:36,592 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:36,592 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/C is initiating minor compaction (all files) 2024-11-25T17:10:36,592 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/C in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:36,592 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/fe97a8d3166643ff9a4b92a56b1b0533, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/e0f56b019aa649c79e1698e5e7200504, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/530898b084f0490c91d2f5260fbb6b34] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=36.8 K 2024-11-25T17:10:36,592 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting fe97a8d3166643ff9a4b92a56b1b0533, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732554633825 2024-11-25T17:10:36,593 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting e0f56b019aa649c79e1698e5e7200504, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732554634161 2024-11-25T17:10:36,593 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 530898b084f0490c91d2f5260fbb6b34, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732554635322 2024-11-25T17:10:36,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742512_1688 (size=32107) 2024-11-25T17:10:36,621 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/a2eaaac2005845f0acb9d4c9c432f3fa as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a2eaaac2005845f0acb9d4c9c432f3fa 2024-11-25T17:10:36,627 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/A of c2213e06f0c6c3750162aafa4b26c5ef into a2eaaac2005845f0acb9d4c9c432f3fa(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:36,627 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:36,627 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/A, priority=13, startTime=1732554636499; duration=0sec 2024-11-25T17:10:36,627 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:36,627 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:A 2024-11-25T17:10:36,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742513_1689 (size=12454) 2024-11-25T17:10:36,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:36,635 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125fc9e05c6fc1d4830a010626f87fa1ba2_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125fc9e05c6fc1d4830a010626f87fa1ba2_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:36,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/3f3b2d91ab8c4b2eb852c1ba65b5d407, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:36,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/3f3b2d91ab8c4b2eb852c1ba65b5d407 is 175, key is test_row_0/A:col10/1732554636006/Put/seqid=0 2024-11-25T17:10:36,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-25T17:10:36,648 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#C#compaction#583 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:36,649 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/f5c1a59980d347aeae0e35724e3ec299 is 50, key is test_row_0/C:col10/1732554635334/Put/seqid=0 2024-11-25T17:10:36,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742514_1690 (size=31255) 2024-11-25T17:10:36,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:36,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:36,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742515_1691 (size=13153) 2024-11-25T17:10:36,668 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/f5c1a59980d347aeae0e35724e3ec299 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/f5c1a59980d347aeae0e35724e3ec299 2024-11-25T17:10:36,673 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/C of c2213e06f0c6c3750162aafa4b26c5ef into f5c1a59980d347aeae0e35724e3ec299(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:36,673 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:36,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554696671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,674 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/C, priority=13, startTime=1732554636499; duration=0sec 2024-11-25T17:10:36,674 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:36,675 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:C 2024-11-25T17:10:36,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554696672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554696674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554696776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554696782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554696789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554696982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554696986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:36,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:36,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554696993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:37,058 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=397, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/3f3b2d91ab8c4b2eb852c1ba65b5d407 2024-11-25T17:10:37,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/b0b4eec7bf9f402caa56b502b37c809a is 50, key is test_row_0/B:col10/1732554636006/Put/seqid=0 2024-11-25T17:10:37,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742516_1692 (size=12301) 2024-11-25T17:10:37,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:37,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554697286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:37,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:37,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554697292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:37,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:37,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554697296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:37,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:37,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554697457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:37,528 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/b0b4eec7bf9f402caa56b502b37c809a 2024-11-25T17:10:37,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/055e6cad7656445cabacd176de97ce8e is 50, key is test_row_0/C:col10/1732554636006/Put/seqid=0 2024-11-25T17:10:37,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742517_1693 (size=12301) 2024-11-25T17:10:37,581 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/055e6cad7656445cabacd176de97ce8e 2024-11-25T17:10:37,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/3f3b2d91ab8c4b2eb852c1ba65b5d407 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3f3b2d91ab8c4b2eb852c1ba65b5d407 2024-11-25T17:10:37,598 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3f3b2d91ab8c4b2eb852c1ba65b5d407, entries=150, sequenceid=397, filesize=30.5 K 2024-11-25T17:10:37,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/b0b4eec7bf9f402caa56b502b37c809a as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/b0b4eec7bf9f402caa56b502b37c809a 2024-11-25T17:10:37,605 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/b0b4eec7bf9f402caa56b502b37c809a, entries=150, sequenceid=397, filesize=12.0 K 2024-11-25T17:10:37,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/055e6cad7656445cabacd176de97ce8e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/055e6cad7656445cabacd176de97ce8e 2024-11-25T17:10:37,610 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/055e6cad7656445cabacd176de97ce8e, entries=150, sequenceid=397, filesize=12.0 K 2024-11-25T17:10:37,611 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for c2213e06f0c6c3750162aafa4b26c5ef in 1036ms, sequenceid=397, compaction requested=false 2024-11-25T17:10:37,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:37,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:37,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-25T17:10:37,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-11-25T17:10:37,614 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-25T17:10:37,614 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0730 sec 2024-11-25T17:10:37,615 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 3.0780 sec 2024-11-25T17:10:37,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:37,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-25T17:10:37,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:37,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:37,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:37,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:37,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:37,795 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:37,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125844f3251d6cb47a08e6dae9b81526bff_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554636666/Put/seqid=0 2024-11-25T17:10:37,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742518_1694 (size=12454) 2024-11-25T17:10:37,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554697819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:37,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554697819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:37,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554697819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:37,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:37,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554697923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:37,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554697927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:37,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:37,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554697927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:38,130 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554698129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:38,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554698129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:38,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554698131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:38,219 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:38,223 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125844f3251d6cb47a08e6dae9b81526bff_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125844f3251d6cb47a08e6dae9b81526bff_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:38,224 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/b945217cccfb4c04bac1b7a7b2e1007d, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:38,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/b945217cccfb4c04bac1b7a7b2e1007d is 175, key is test_row_0/A:col10/1732554636666/Put/seqid=0 2024-11-25T17:10:38,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742519_1695 (size=31255) 2024-11-25T17:10:38,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:38,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554698432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:38,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:38,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554698433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:38,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:38,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554698435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:38,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-25T17:10:38,648 INFO [Thread-2582 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-25T17:10:38,650 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:10:38,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-11-25T17:10:38,651 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:10:38,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-25T17:10:38,652 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:10:38,652 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:10:38,682 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=416, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/b945217cccfb4c04bac1b7a7b2e1007d 2024-11-25T17:10:38,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/592ab8ece1b14cc882641ddb4a6481de is 50, key is test_row_0/B:col10/1732554636666/Put/seqid=0 2024-11-25T17:10:38,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742520_1696 (size=12301) 2024-11-25T17:10:38,711 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/592ab8ece1b14cc882641ddb4a6481de 2024-11-25T17:10:38,725 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/525ceba6db184a1ab502bdeb9a3aa26b is 50, key is test_row_0/C:col10/1732554636666/Put/seqid=0 2024-11-25T17:10:38,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-25T17:10:38,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742521_1697 (size=12301) 2024-11-25T17:10:38,759 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/525ceba6db184a1ab502bdeb9a3aa26b 2024-11-25T17:10:38,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/b945217cccfb4c04bac1b7a7b2e1007d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/b945217cccfb4c04bac1b7a7b2e1007d 2024-11-25T17:10:38,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/b945217cccfb4c04bac1b7a7b2e1007d, entries=150, sequenceid=416, filesize=30.5 K 2024-11-25T17:10:38,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/592ab8ece1b14cc882641ddb4a6481de as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/592ab8ece1b14cc882641ddb4a6481de 2024-11-25T17:10:38,793 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/592ab8ece1b14cc882641ddb4a6481de, entries=150, sequenceid=416, filesize=12.0 K 2024-11-25T17:10:38,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/525ceba6db184a1ab502bdeb9a3aa26b as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/525ceba6db184a1ab502bdeb9a3aa26b 2024-11-25T17:10:38,803 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:38,805 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-25T17:10:38,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:38,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:38,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:38,806 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:38,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:38,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:38,830 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/525ceba6db184a1ab502bdeb9a3aa26b, entries=150, sequenceid=416, filesize=12.0 K 2024-11-25T17:10:38,830 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for c2213e06f0c6c3750162aafa4b26c5ef in 1036ms, sequenceid=416, compaction requested=true 2024-11-25T17:10:38,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:38,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:38,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:38,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:38,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:38,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:38,831 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:38,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-25T17:10:38,831 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:38,841 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:38,841 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/B is initiating minor compaction (all files) 2024-11-25T17:10:38,842 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/B in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:38,842 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/a7d45af9308d4bba8d159f847a3c1bdc, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/b0b4eec7bf9f402caa56b502b37c809a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/592ab8ece1b14cc882641ddb4a6481de] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=36.9 K 2024-11-25T17:10:38,842 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94617 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:38,842 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/A is initiating minor compaction (all files) 2024-11-25T17:10:38,842 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/A in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:38,842 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a2eaaac2005845f0acb9d4c9c432f3fa, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3f3b2d91ab8c4b2eb852c1ba65b5d407, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/b945217cccfb4c04bac1b7a7b2e1007d] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=92.4 K 2024-11-25T17:10:38,842 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:38,842 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting a7d45af9308d4bba8d159f847a3c1bdc, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732554635322 2024-11-25T17:10:38,842 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a2eaaac2005845f0acb9d4c9c432f3fa, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3f3b2d91ab8c4b2eb852c1ba65b5d407, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/b945217cccfb4c04bac1b7a7b2e1007d] 2024-11-25T17:10:38,845 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2eaaac2005845f0acb9d4c9c432f3fa, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732554635322 2024-11-25T17:10:38,845 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting b0b4eec7bf9f402caa56b502b37c809a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1732554636006 2024-11-25T17:10:38,846 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f3b2d91ab8c4b2eb852c1ba65b5d407, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1732554636006 2024-11-25T17:10:38,846 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 592ab8ece1b14cc882641ddb4a6481de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732554636666 2024-11-25T17:10:38,846 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting b945217cccfb4c04bac1b7a7b2e1007d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732554636666 2024-11-25T17:10:38,888 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:38,895 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#B#compaction#590 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:38,896 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/6ed1a116f40746bf9eff7840e49cb4d4 is 50, key is test_row_0/B:col10/1732554636666/Put/seqid=0 2024-11-25T17:10:38,897 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241125b3ae72f284074f1db3f324193b356f30_c2213e06f0c6c3750162aafa4b26c5ef store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:38,899 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241125b3ae72f284074f1db3f324193b356f30_c2213e06f0c6c3750162aafa4b26c5ef, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:38,899 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125b3ae72f284074f1db3f324193b356f30_c2213e06f0c6c3750162aafa4b26c5ef because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:38,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742522_1698 (size=4469) 2024-11-25T17:10:38,928 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#A#compaction#589 average throughput is 0.61 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:38,928 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/d8395edac19f4c75b1e2eb389b8306fb is 175, key is test_row_0/A:col10/1732554636666/Put/seqid=0 2024-11-25T17:10:38,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742523_1699 (size=13255) 2024-11-25T17:10:38,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:38,944 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-25T17:10:38,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:38,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:38,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:38,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:38,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:38,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-25T17:10:38,960 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:38,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742524_1700 (size=32209) 2024-11-25T17:10:38,961 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-25T17:10:38,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:38,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:38,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:38,962 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:38,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:38,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:38,963 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/6ed1a116f40746bf9eff7840e49cb4d4 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/6ed1a116f40746bf9eff7840e49cb4d4 2024-11-25T17:10:38,971 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125d9b407f607474346a6cd9064b3738519_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554638943/Put/seqid=0 2024-11-25T17:10:38,977 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/d8395edac19f4c75b1e2eb389b8306fb as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/d8395edac19f4c75b1e2eb389b8306fb 2024-11-25T17:10:38,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742525_1701 (size=12454) 2024-11-25T17:10:39,004 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,007 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125d9b407f607474346a6cd9064b3738519_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125d9b407f607474346a6cd9064b3738519_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:39,008 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/6e9945cc8adc40f9a79a3b90cefd366f, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:39,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/6e9945cc8adc40f9a79a3b90cefd366f is 175, key is test_row_0/A:col10/1732554638943/Put/seqid=0 2024-11-25T17:10:39,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742526_1702 (size=31255) 2024-11-25T17:10:39,024 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=440, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/6e9945cc8adc40f9a79a3b90cefd366f 2024-11-25T17:10:39,031 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/B of c2213e06f0c6c3750162aafa4b26c5ef into 6ed1a116f40746bf9eff7840e49cb4d4(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:39,031 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/A of c2213e06f0c6c3750162aafa4b26c5ef into d8395edac19f4c75b1e2eb389b8306fb(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:39,031 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:39,031 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:39,031 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/B, priority=13, startTime=1732554638831; duration=0sec 2024-11-25T17:10:39,031 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/A, priority=13, startTime=1732554638831; duration=0sec 2024-11-25T17:10:39,032 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:39,032 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:B 2024-11-25T17:10:39,032 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:39,032 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:39,032 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:A 2024-11-25T17:10:39,032 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:39,033 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/C is initiating minor compaction (all files) 2024-11-25T17:10:39,033 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/C in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:39,033 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/3019da99338f4bd790dd674752685cfd is 50, key is test_row_0/B:col10/1732554638943/Put/seqid=0 2024-11-25T17:10:39,033 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/f5c1a59980d347aeae0e35724e3ec299, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/055e6cad7656445cabacd176de97ce8e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/525ceba6db184a1ab502bdeb9a3aa26b] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=36.9 K 2024-11-25T17:10:39,033 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting f5c1a59980d347aeae0e35724e3ec299, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732554635322 2024-11-25T17:10:39,034 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 055e6cad7656445cabacd176de97ce8e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1732554636006 2024-11-25T17:10:39,034 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 525ceba6db184a1ab502bdeb9a3aa26b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732554636666 2024-11-25T17:10:39,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554699032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554699034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554699036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742527_1703 (size=12301) 2024-11-25T17:10:39,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=440 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/3019da99338f4bd790dd674752685cfd 2024-11-25T17:10:39,051 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#C#compaction#593 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:39,051 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/5045fe9245b3402396e48371e56cb47e is 50, key is test_row_0/C:col10/1732554636666/Put/seqid=0 2024-11-25T17:10:39,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/e2ee196a3add4dfe9970e1a7c1f3c43d is 50, key is test_row_0/C:col10/1732554638943/Put/seqid=0 2024-11-25T17:10:39,116 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:39,117 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-25T17:10:39,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:39,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:39,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:39,117 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:39,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:39,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742528_1704 (size=13255) 2024-11-25T17:10:39,149 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/5045fe9245b3402396e48371e56cb47e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/5045fe9245b3402396e48371e56cb47e 2024-11-25T17:10:39,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554699150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,151 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554699150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,151 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554699150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742529_1705 (size=12301) 2024-11-25T17:10:39,152 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=440 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/e2ee196a3add4dfe9970e1a7c1f3c43d 2024-11-25T17:10:39,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/6e9945cc8adc40f9a79a3b90cefd366f as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6e9945cc8adc40f9a79a3b90cefd366f 2024-11-25T17:10:39,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6e9945cc8adc40f9a79a3b90cefd366f, entries=150, sequenceid=440, filesize=30.5 K 2024-11-25T17:10:39,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/3019da99338f4bd790dd674752685cfd as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3019da99338f4bd790dd674752685cfd 2024-11-25T17:10:39,165 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/C of c2213e06f0c6c3750162aafa4b26c5ef into 5045fe9245b3402396e48371e56cb47e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:39,165 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:39,165 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/C, priority=13, startTime=1732554638831; duration=0sec 2024-11-25T17:10:39,165 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:39,165 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:C 2024-11-25T17:10:39,168 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3019da99338f4bd790dd674752685cfd, entries=150, sequenceid=440, filesize=12.0 K 2024-11-25T17:10:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/e2ee196a3add4dfe9970e1a7c1f3c43d as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/e2ee196a3add4dfe9970e1a7c1f3c43d 2024-11-25T17:10:39,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/e2ee196a3add4dfe9970e1a7c1f3c43d, entries=150, sequenceid=440, filesize=12.0 K 2024-11-25T17:10:39,196 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for c2213e06f0c6c3750162aafa4b26c5ef in 252ms, sequenceid=440, compaction requested=false 2024-11-25T17:10:39,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:39,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-25T17:10:39,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,273 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:39,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-25T17:10:39,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:39,273 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-25T17:10:39,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:39,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:39,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:39,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:39,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:39,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:39,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112574912ec812fc42308bc0543afb3e89e5_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554638986/Put/seqid=0 2024-11-25T17:10:39,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742530_1706 (size=9914) 2024-11-25T17:10:39,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,354 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112574912ec812fc42308bc0543afb3e89e5_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112574912ec812fc42308bc0543afb3e89e5_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:39,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/7b5a6250ad8144549880ee93f40e2818, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:39,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:39,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:39,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/7b5a6250ad8144549880ee93f40e2818 is 175, key is test_row_0/A:col10/1732554638986/Put/seqid=0 2024-11-25T17:10:39,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742531_1707 (size=22561) 2024-11-25T17:10:39,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:39,410 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=455, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/7b5a6250ad8144549880ee93f40e2818 2024-11-25T17:10:39,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554699431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554699437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/06ac006704844de3ae459f6d8b659a83 is 50, key is test_row_0/B:col10/1732554638986/Put/seqid=0 2024-11-25T17:10:39,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554699438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45204 deadline: 1732554699464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,466 DEBUG [Thread-2578 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., hostname=6579369734b6,41865,1732554474464, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:10:39,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742532_1708 (size=9857) 2024-11-25T17:10:39,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554699539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554699549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554699550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45272 deadline: 1732554699579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,582 DEBUG [Thread-2580 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8188 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., hostname=6579369734b6,41865,1732554474464, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T17:10:39,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554699744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554699757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:39,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554699759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:39,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-25T17:10:39,870 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/06ac006704844de3ae459f6d8b659a83 2024-11-25T17:10:39,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/de144886405a43e69110bde0e92d5be2 is 50, key is test_row_0/C:col10/1732554638986/Put/seqid=0 2024-11-25T17:10:39,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742533_1709 (size=9857) 2024-11-25T17:10:39,907 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/de144886405a43e69110bde0e92d5be2 2024-11-25T17:10:39,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/7b5a6250ad8144549880ee93f40e2818 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/7b5a6250ad8144549880ee93f40e2818 2024-11-25T17:10:39,915 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/7b5a6250ad8144549880ee93f40e2818, entries=100, sequenceid=455, filesize=22.0 K 2024-11-25T17:10:39,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/06ac006704844de3ae459f6d8b659a83 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/06ac006704844de3ae459f6d8b659a83 2024-11-25T17:10:39,928 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/06ac006704844de3ae459f6d8b659a83, entries=100, sequenceid=455, filesize=9.6 K 2024-11-25T17:10:39,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/de144886405a43e69110bde0e92d5be2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/de144886405a43e69110bde0e92d5be2 2024-11-25T17:10:39,948 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/de144886405a43e69110bde0e92d5be2, entries=100, sequenceid=455, filesize=9.6 K 2024-11-25T17:10:39,948 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for c2213e06f0c6c3750162aafa4b26c5ef in 675ms, sequenceid=455, compaction requested=true 2024-11-25T17:10:39,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:39,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:39,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-25T17:10:39,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-11-25T17:10:39,951 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-25T17:10:39,952 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2980 sec 2024-11-25T17:10:39,953 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 1.3020 sec 2024-11-25T17:10:40,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:40,050 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-25T17:10:40,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:40,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:40,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:40,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:40,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:40,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:40,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411250c3ebda1c6554cefbb04e863c09cd1d4_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554639436/Put/seqid=0 2024-11-25T17:10:40,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742534_1710 (size=14994) 2024-11-25T17:10:40,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554700075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:40,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:40,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554700075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:40,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:40,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554700076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:40,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:40,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554700181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:40,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:40,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:40,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554700181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:40,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554700181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:40,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:40,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554700386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:40,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:40,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554700393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:40,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:40,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554700394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:40,478 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,513 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411250c3ebda1c6554cefbb04e863c09cd1d4_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411250c3ebda1c6554cefbb04e863c09cd1d4_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:40,522 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/a115349417854f39a6cbbd4796b70a49, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:40,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/a115349417854f39a6cbbd4796b70a49 is 175, key is test_row_0/A:col10/1732554639436/Put/seqid=0 2024-11-25T17:10:40,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742535_1711 (size=39949) 2024-11-25T17:10:40,547 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=480, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/a115349417854f39a6cbbd4796b70a49 2024-11-25T17:10:40,563 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/1068d45459dc4f339f14e3284b199371 is 50, key is test_row_0/B:col10/1732554639436/Put/seqid=0 2024-11-25T17:10:40,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742536_1712 (size=12301) 2024-11-25T17:10:40,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/1068d45459dc4f339f14e3284b199371 2024-11-25T17:10:40,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/91adc89c79094fcbb4d322fb86edd6f9 is 50, key is test_row_0/C:col10/1732554639436/Put/seqid=0 2024-11-25T17:10:40,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742537_1713 (size=12301) 2024-11-25T17:10:40,677 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/91adc89c79094fcbb4d322fb86edd6f9 2024-11-25T17:10:40,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/a115349417854f39a6cbbd4796b70a49 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a115349417854f39a6cbbd4796b70a49 2024-11-25T17:10:40,693 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a115349417854f39a6cbbd4796b70a49, entries=200, sequenceid=480, filesize=39.0 K 2024-11-25T17:10:40,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/1068d45459dc4f339f14e3284b199371 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1068d45459dc4f339f14e3284b199371 2024-11-25T17:10:40,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1068d45459dc4f339f14e3284b199371, entries=150, sequenceid=480, filesize=12.0 K 2024-11-25T17:10:40,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/91adc89c79094fcbb4d322fb86edd6f9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/91adc89c79094fcbb4d322fb86edd6f9 2024-11-25T17:10:40,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:40,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554700696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:40,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,707 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/91adc89c79094fcbb4d322fb86edd6f9, entries=150, sequenceid=480, filesize=12.0 K 2024-11-25T17:10:40,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,708 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for c2213e06f0c6c3750162aafa4b26c5ef in 658ms, sequenceid=480, compaction requested=true 2024-11-25T17:10:40,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:40,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:40,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:40,708 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:40,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:40,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:40,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:40,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-25T17:10:40,709 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:40,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,710 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125974 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:40,710 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/A is initiating minor compaction (all files) 2024-11-25T17:10:40,710 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/A in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:40,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,710 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/d8395edac19f4c75b1e2eb389b8306fb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6e9945cc8adc40f9a79a3b90cefd366f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/7b5a6250ad8144549880ee93f40e2818, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a115349417854f39a6cbbd4796b70a49] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=123.0 K 2024-11-25T17:10:40,710 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:40,710 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/d8395edac19f4c75b1e2eb389b8306fb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6e9945cc8adc40f9a79a3b90cefd366f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/7b5a6250ad8144549880ee93f40e2818, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a115349417854f39a6cbbd4796b70a49] 2024-11-25T17:10:40,710 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47714 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:40,710 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8395edac19f4c75b1e2eb389b8306fb, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732554636666 2024-11-25T17:10:40,710 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/B is initiating minor compaction (all files) 2024-11-25T17:10:40,710 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/B in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:40,711 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/6ed1a116f40746bf9eff7840e49cb4d4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3019da99338f4bd790dd674752685cfd, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/06ac006704844de3ae459f6d8b659a83, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1068d45459dc4f339f14e3284b199371] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=46.6 K 2024-11-25T17:10:40,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,711 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e9945cc8adc40f9a79a3b90cefd366f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=440, earliestPutTs=1732554637814 2024-11-25T17:10:40,711 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ed1a116f40746bf9eff7840e49cb4d4, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732554636666 2024-11-25T17:10:40,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,711 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b5a6250ad8144549880ee93f40e2818, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1732554638963 2024-11-25T17:10:40,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,711 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 3019da99338f4bd790dd674752685cfd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=440, earliestPutTs=1732554637814 2024-11-25T17:10:40,711 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting a115349417854f39a6cbbd4796b70a49, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732554639429 2024-11-25T17:10:40,712 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 06ac006704844de3ae459f6d8b659a83, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1732554638963 2024-11-25T17:10:40,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,712 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 1068d45459dc4f339f14e3284b199371, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732554639429 2024-11-25T17:10:40,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:40,716 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-25T17:10:40,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:40,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:40,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:40,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:40,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:40,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:40,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,720 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:40,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,723 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411253267fdd5dd7645538d12f5434d7881ba_c2213e06f0c6c3750162aafa4b26c5ef store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:40,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125b39ab163336848d697edbf4fdbd1d5a5_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554640066/Put/seqid=0 2024-11-25T17:10:40,724 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#B#compaction#602 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:40,725 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/200d176741aa427a95ec32599794f68e is 50, key is test_row_0/B:col10/1732554639436/Put/seqid=0 2024-11-25T17:10:40,726 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411253267fdd5dd7645538d12f5434d7881ba_c2213e06f0c6c3750162aafa4b26c5ef, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:40,726 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411253267fdd5dd7645538d12f5434d7881ba_c2213e06f0c6c3750162aafa4b26c5ef because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:40,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742538_1714 (size=12454) 2024-11-25T17:10:40,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,729 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742539_1715 (size=13391) 2024-11-25T17:10:40,733 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125b39ab163336848d697edbf4fdbd1d5a5_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125b39ab163336848d697edbf4fdbd1d5a5_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:40,734 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/c06c7a0472754d288b6de2df7cc387fe, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:40,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/c06c7a0472754d288b6de2df7cc387fe is 175, key is test_row_0/A:col10/1732554640066/Put/seqid=0 2024-11-25T17:10:40,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742540_1716 (size=4469) 2024-11-25T17:10:40,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,736 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#A#compaction#601 average throughput is 1.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:40,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,737 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/b4b0a2e4815541beb67345a81360a603 is 175, key is test_row_0/A:col10/1732554639436/Put/seqid=0 2024-11-25T17:10:40,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742541_1717 (size=31255) 2024-11-25T17:10:40,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,742 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=492, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/c06c7a0472754d288b6de2df7cc387fe 2024-11-25T17:10:40,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:40,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742542_1718 (size=32345) 2024-11-25T17:10:40,759 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/300e7e5223d640a988ec3737677867b8 is 50, key is test_row_0/B:col10/1732554640066/Put/seqid=0 2024-11-25T17:10:40,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-25T17:10:40,763 INFO [Thread-2582 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-11-25T17:10:40,765 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/b4b0a2e4815541beb67345a81360a603 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/b4b0a2e4815541beb67345a81360a603 2024-11-25T17:10:40,768 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-25T17:10:40,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees 2024-11-25T17:10:40,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-25T17:10:40,772 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-25T17:10:40,772 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/A of c2213e06f0c6c3750162aafa4b26c5ef into b4b0a2e4815541beb67345a81360a603(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:40,772 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:40,772 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/A, priority=12, startTime=1732554640708; duration=0sec 2024-11-25T17:10:40,772 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:40,772 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:A 2024-11-25T17:10:40,772 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-25T17:10:40,772 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T17:10:40,773 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T17:10:40,774 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47714 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-25T17:10:40,774 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/C is initiating minor compaction (all files) 2024-11-25T17:10:40,774 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/C in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:40,774 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/5045fe9245b3402396e48371e56cb47e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/e2ee196a3add4dfe9970e1a7c1f3c43d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/de144886405a43e69110bde0e92d5be2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/91adc89c79094fcbb4d322fb86edd6f9] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=46.6 K 2024-11-25T17:10:40,775 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5045fe9245b3402396e48371e56cb47e, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732554636666 2024-11-25T17:10:40,775 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2ee196a3add4dfe9970e1a7c1f3c43d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=440, earliestPutTs=1732554637814 2024-11-25T17:10:40,775 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting de144886405a43e69110bde0e92d5be2, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1732554638963 2024-11-25T17:10:40,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742543_1719 (size=12301) 2024-11-25T17:10:40,776 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91adc89c79094fcbb4d322fb86edd6f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732554639429 2024-11-25T17:10:40,777 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=492 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/300e7e5223d640a988ec3737677867b8 2024-11-25T17:10:40,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:40,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554700781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:40,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:40,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554700784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:40,799 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#C#compaction#605 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:40,800 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/d50b44087162442f870b3bb80a768cf2 is 50, key is test_row_0/C:col10/1732554639436/Put/seqid=0 2024-11-25T17:10:40,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/1c57371690f744d4af54eb798c2c1106 is 50, key is test_row_0/C:col10/1732554640066/Put/seqid=0 2024-11-25T17:10:40,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742544_1720 (size=13391) 2024-11-25T17:10:40,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742545_1721 (size=12301) 2024-11-25T17:10:40,849 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=492 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/1c57371690f744d4af54eb798c2c1106 2024-11-25T17:10:40,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/c06c7a0472754d288b6de2df7cc387fe as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/c06c7a0472754d288b6de2df7cc387fe 2024-11-25T17:10:40,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/c06c7a0472754d288b6de2df7cc387fe, entries=150, sequenceid=492, filesize=30.5 K 2024-11-25T17:10:40,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/300e7e5223d640a988ec3737677867b8 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/300e7e5223d640a988ec3737677867b8 2024-11-25T17:10:40,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-25T17:10:40,872 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/300e7e5223d640a988ec3737677867b8, entries=150, sequenceid=492, filesize=12.0 K 2024-11-25T17:10:40,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/1c57371690f744d4af54eb798c2c1106 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/1c57371690f744d4af54eb798c2c1106 2024-11-25T17:10:40,893 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/1c57371690f744d4af54eb798c2c1106, entries=150, sequenceid=492, filesize=12.0 K 2024-11-25T17:10:40,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:40,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554700890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:40,894 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for c2213e06f0c6c3750162aafa4b26c5ef in 178ms, sequenceid=492, compaction requested=false 2024-11-25T17:10:40,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:40,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:40,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-25T17:10:40,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:40,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:40,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:40,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:40,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:40,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:40,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125fb90f42ef88346979909b45815be6096_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554640898/Put/seqid=0 2024-11-25T17:10:40,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:40,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554700919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:40,925 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:40,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-25T17:10:40,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:40,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:40,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:40,930 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:40,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:40,935 DEBUG [Thread-2587 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d7fe93b to 127.0.0.1:56265 2024-11-25T17:10:40,935 DEBUG [Thread-2587 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:40,936 DEBUG [Thread-2585 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58971172 to 127.0.0.1:56265 2024-11-25T17:10:40,936 DEBUG [Thread-2585 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:40,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:40,938 DEBUG [Thread-2591 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58460ef3 to 127.0.0.1:56265 2024-11-25T17:10:40,938 DEBUG [Thread-2591 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:40,938 DEBUG [Thread-2589 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11c440f7 to 127.0.0.1:56265 2024-11-25T17:10:40,939 DEBUG [Thread-2589 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:40,940 DEBUG [Thread-2583 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60d631a3 to 127.0.0.1:56265 2024-11-25T17:10:40,940 DEBUG [Thread-2583 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:40,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742546_1722 (size=14994) 2024-11-25T17:10:41,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:41,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554701022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:41,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-25T17:10:41,092 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:41,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-25T17:10:41,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:41,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:41,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:41,092 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:41,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:41,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:41,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:41,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554701094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:41,136 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/200d176741aa427a95ec32599794f68e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/200d176741aa427a95ec32599794f68e 2024-11-25T17:10:41,144 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/B of c2213e06f0c6c3750162aafa4b26c5ef into 200d176741aa427a95ec32599794f68e(size=13.1 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:41,144 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:41,144 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/B, priority=12, startTime=1732554640708; duration=0sec 2024-11-25T17:10:41,145 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:41,145 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:B 2024-11-25T17:10:41,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:41,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45194 deadline: 1732554701206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:41,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:41,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554701224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:41,229 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/d50b44087162442f870b3bb80a768cf2 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/d50b44087162442f870b3bb80a768cf2 2024-11-25T17:10:41,233 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/C of c2213e06f0c6c3750162aafa4b26c5ef into d50b44087162442f870b3bb80a768cf2(size=13.1 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:41,233 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:41,233 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/C, priority=12, startTime=1732554640708; duration=0sec 2024-11-25T17:10:41,233 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:41,233 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:C 2024-11-25T17:10:41,244 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:41,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-25T17:10:41,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:41,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:41,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:41,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:41,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:41,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:41,354 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:41,357 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125fb90f42ef88346979909b45815be6096_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125fb90f42ef88346979909b45815be6096_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:41,358 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/f4447d8efd5d492a84311a0445aa72ef, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:41,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/f4447d8efd5d492a84311a0445aa72ef is 175, key is test_row_0/A:col10/1732554640898/Put/seqid=0 2024-11-25T17:10:41,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742547_1723 (size=39949) 2024-11-25T17:10:41,362 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=519, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/f4447d8efd5d492a84311a0445aa72ef 2024-11-25T17:10:41,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/004e0cb17861426995fe0a576cd0d916 is 50, key is test_row_0/B:col10/1732554640898/Put/seqid=0 2024-11-25T17:10:41,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-25T17:10:41,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742548_1724 (size=12301) 2024-11-25T17:10:41,384 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=519 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/004e0cb17861426995fe0a576cd0d916 2024-11-25T17:10:41,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/2f1372b540b74991902ac565fec9de51 is 50, key is test_row_0/C:col10/1732554640898/Put/seqid=0 2024-11-25T17:10:41,397 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:41,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-25T17:10:41,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:41,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:41,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:41,398 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:41,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:41,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:41,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45274 deadline: 1732554701398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:41,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:41,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742549_1725 (size=12301) 2024-11-25T17:10:41,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-25T17:10:41,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:45208 deadline: 1732554701527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 2024-11-25T17:10:41,550 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:41,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-25T17:10:41,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:41,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:41,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:41,551 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:41,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:41,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:41,703 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:41,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-25T17:10:41,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:41,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:41,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:41,704 ERROR [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:41,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:41,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T17:10:41,801 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=519 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/2f1372b540b74991902ac565fec9de51 2024-11-25T17:10:41,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/f4447d8efd5d492a84311a0445aa72ef as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/f4447d8efd5d492a84311a0445aa72ef 2024-11-25T17:10:41,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/f4447d8efd5d492a84311a0445aa72ef, entries=200, sequenceid=519, filesize=39.0 K 2024-11-25T17:10:41,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/004e0cb17861426995fe0a576cd0d916 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/004e0cb17861426995fe0a576cd0d916 2024-11-25T17:10:41,812 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/004e0cb17861426995fe0a576cd0d916, entries=150, sequenceid=519, filesize=12.0 K 2024-11-25T17:10:41,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/2f1372b540b74991902ac565fec9de51 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/2f1372b540b74991902ac565fec9de51 2024-11-25T17:10:41,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/2f1372b540b74991902ac565fec9de51, entries=150, sequenceid=519, filesize=12.0 K 2024-11-25T17:10:41,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for c2213e06f0c6c3750162aafa4b26c5ef in 918ms, sequenceid=519, compaction requested=true 2024-11-25T17:10:41,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:41,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:A, priority=-2147483648, current under compaction store size is 1 2024-11-25T17:10:41,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:41,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:B, priority=-2147483648, current under compaction store size is 2 2024-11-25T17:10:41,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:41,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c2213e06f0c6c3750162aafa4b26c5ef:C, priority=-2147483648, current under compaction store size is 3 2024-11-25T17:10:41,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:41,815 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:41,815 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:41,816 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:41,816 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:41,816 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/B is initiating minor compaction (all files) 2024-11-25T17:10:41,816 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/A is initiating minor compaction (all files) 2024-11-25T17:10:41,816 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/B in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:41,816 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/A in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:41,816 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/b4b0a2e4815541beb67345a81360a603, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/c06c7a0472754d288b6de2df7cc387fe, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/f4447d8efd5d492a84311a0445aa72ef] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=101.1 K 2024-11-25T17:10:41,816 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/200d176741aa427a95ec32599794f68e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/300e7e5223d640a988ec3737677867b8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/004e0cb17861426995fe0a576cd0d916] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=37.1 K 2024-11-25T17:10:41,816 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:41,816 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. files: [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/b4b0a2e4815541beb67345a81360a603, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/c06c7a0472754d288b6de2df7cc387fe, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/f4447d8efd5d492a84311a0445aa72ef] 2024-11-25T17:10:41,817 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 200d176741aa427a95ec32599794f68e, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732554639429 2024-11-25T17:10:41,817 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4b0a2e4815541beb67345a81360a603, keycount=150, bloomtype=ROW, size=31.6 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732554639429 2024-11-25T17:10:41,817 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 300e7e5223d640a988ec3737677867b8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=492, earliestPutTs=1732554640066 2024-11-25T17:10:41,817 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting c06c7a0472754d288b6de2df7cc387fe, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=492, earliestPutTs=1732554640066 2024-11-25T17:10:41,817 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 004e0cb17861426995fe0a576cd0d916, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=519, earliestPutTs=1732554640782 2024-11-25T17:10:41,817 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4447d8efd5d492a84311a0445aa72ef, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=519, earliestPutTs=1732554640763 2024-11-25T17:10:41,823 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:41,823 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#B#compaction#610 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:41,824 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/7a75443aa4474083b189f5a5e9e8acad is 50, key is test_row_0/B:col10/1732554640898/Put/seqid=0 2024-11-25T17:10:41,824 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241125761b2053eaff47029c574fb0997e8ace_c2213e06f0c6c3750162aafa4b26c5ef store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:41,827 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241125761b2053eaff47029c574fb0997e8ace_c2213e06f0c6c3750162aafa4b26c5ef, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:41,827 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125761b2053eaff47029c574fb0997e8ace_c2213e06f0c6c3750162aafa4b26c5ef because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:41,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742551_1727 (size=4469) 2024-11-25T17:10:41,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742550_1726 (size=13493) 2024-11-25T17:10:41,844 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/7a75443aa4474083b189f5a5e9e8acad as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/7a75443aa4474083b189f5a5e9e8acad 2024-11-25T17:10:41,849 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/B of c2213e06f0c6c3750162aafa4b26c5ef into 7a75443aa4474083b189f5a5e9e8acad(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:41,849 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:41,849 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/B, priority=13, startTime=1732554641815; duration=0sec 2024-11-25T17:10:41,849 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-25T17:10:41,849 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:B 2024-11-25T17:10:41,850 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T17:10:41,850 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T17:10:41,850 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1540): c2213e06f0c6c3750162aafa4b26c5ef/C is initiating minor compaction (all files) 2024-11-25T17:10:41,850 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c2213e06f0c6c3750162aafa4b26c5ef/C in TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:41,850 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/d50b44087162442f870b3bb80a768cf2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/1c57371690f744d4af54eb798c2c1106, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/2f1372b540b74991902ac565fec9de51] into tmpdir=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp, totalSize=37.1 K 2024-11-25T17:10:41,851 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting d50b44087162442f870b3bb80a768cf2, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732554639429 2024-11-25T17:10:41,851 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c57371690f744d4af54eb798c2c1106, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=492, earliestPutTs=1732554640066 2024-11-25T17:10:41,851 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f1372b540b74991902ac565fec9de51, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=519, earliestPutTs=1732554640782 2024-11-25T17:10:41,856 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:41,856 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41865 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-25T17:10:41,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:41,857 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-25T17:10:41,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:41,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:41,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:41,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:41,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:41,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:41,860 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#C#compaction#612 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:41,860 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/8bbf4d202a44472fbdb21757e280d388 is 50, key is test_row_0/C:col10/1732554640898/Put/seqid=0 2024-11-25T17:10:41,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125c4f05b87685e4742a9f28a732a9324bb_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554640906/Put/seqid=0 2024-11-25T17:10:41,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-25T17:10:41,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742552_1728 (size=13493) 2024-11-25T17:10:41,894 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/8bbf4d202a44472fbdb21757e280d388 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/8bbf4d202a44472fbdb21757e280d388 2024-11-25T17:10:41,898 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/C of c2213e06f0c6c3750162aafa4b26c5ef into 8bbf4d202a44472fbdb21757e280d388(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:41,898 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:41,898 INFO [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/C, priority=13, startTime=1732554641815; duration=0sec 2024-11-25T17:10:41,898 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:41,898 DEBUG [RS:0;6579369734b6:41865-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:C 2024-11-25T17:10:41,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41865 {}] regionserver.HRegion(8581): Flush requested on c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:41,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. as already flushing 2024-11-25T17:10:41,902 DEBUG [Thread-2572 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2cbfd84f to 127.0.0.1:56265 2024-11-25T17:10:41,902 DEBUG [Thread-2572 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:41,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742553_1729 (size=12454) 2024-11-25T17:10:41,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:41,916 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125c4f05b87685e4742a9f28a732a9324bb_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125c4f05b87685e4742a9f28a732a9324bb_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:41,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/384b227d74ce4a2a98c60a601eebcfb1, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:41,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/384b227d74ce4a2a98c60a601eebcfb1 is 175, key is test_row_0/A:col10/1732554640906/Put/seqid=0 2024-11-25T17:10:41,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742554_1730 (size=31255) 2024-11-25T17:10:42,032 DEBUG [Thread-2576 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0644b7e6 to 127.0.0.1:56265 2024-11-25T17:10:42,032 DEBUG [Thread-2576 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:42,213 DEBUG [Thread-2574 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3fb684eb to 127.0.0.1:56265 2024-11-25T17:10:42,213 DEBUG [Thread-2574 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:42,240 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c2213e06f0c6c3750162aafa4b26c5ef#A#compaction#611 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T17:10:42,241 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/9e8b9f5fbd6e4ae7836e5e1a5ad880ac is 175, key is test_row_0/A:col10/1732554640898/Put/seqid=0 2024-11-25T17:10:42,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742555_1731 (size=32447) 2024-11-25T17:10:42,325 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=532, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/384b227d74ce4a2a98c60a601eebcfb1 2024-11-25T17:10:42,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/bfc40640f8424e7daf48aa8480fd5ad5 is 50, key is test_row_0/B:col10/1732554640906/Put/seqid=0 2024-11-25T17:10:42,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742556_1732 (size=12301) 2024-11-25T17:10:42,653 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/9e8b9f5fbd6e4ae7836e5e1a5ad880ac as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/9e8b9f5fbd6e4ae7836e5e1a5ad880ac 2024-11-25T17:10:42,658 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c2213e06f0c6c3750162aafa4b26c5ef/A of c2213e06f0c6c3750162aafa4b26c5ef into 9e8b9f5fbd6e4ae7836e5e1a5ad880ac(size=31.7 K), total size for store is 31.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T17:10:42,658 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:42,658 INFO [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef., storeName=c2213e06f0c6c3750162aafa4b26c5ef/A, priority=13, startTime=1732554641815; duration=0sec 2024-11-25T17:10:42,658 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T17:10:42,658 DEBUG [RS:0;6579369734b6:41865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c2213e06f0c6c3750162aafa4b26c5ef:A 2024-11-25T17:10:42,734 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/bfc40640f8424e7daf48aa8480fd5ad5 2024-11-25T17:10:42,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/0c2c33cc192c4df299c61f73de941473 is 50, key is test_row_0/C:col10/1732554640906/Put/seqid=0 2024-11-25T17:10:42,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742557_1733 (size=12301) 2024-11-25T17:10:42,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-25T17:10:43,149 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/0c2c33cc192c4df299c61f73de941473 2024-11-25T17:10:43,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/384b227d74ce4a2a98c60a601eebcfb1 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/384b227d74ce4a2a98c60a601eebcfb1 2024-11-25T17:10:43,166 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/384b227d74ce4a2a98c60a601eebcfb1, entries=150, sequenceid=532, filesize=30.5 K 2024-11-25T17:10:43,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/bfc40640f8424e7daf48aa8480fd5ad5 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/bfc40640f8424e7daf48aa8480fd5ad5 2024-11-25T17:10:43,177 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/bfc40640f8424e7daf48aa8480fd5ad5, entries=150, sequenceid=532, filesize=12.0 K 2024-11-25T17:10:43,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/0c2c33cc192c4df299c61f73de941473 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/0c2c33cc192c4df299c61f73de941473 2024-11-25T17:10:43,189 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/0c2c33cc192c4df299c61f73de941473, entries=150, sequenceid=532, filesize=12.0 K 2024-11-25T17:10:43,190 INFO [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=20.13 KB/20610 for c2213e06f0c6c3750162aafa4b26c5ef in 1334ms, sequenceid=532, compaction requested=false 2024-11-25T17:10:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2538): Flush status journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/6579369734b6:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=180 2024-11-25T17:10:43,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster(4106): Remote procedure done, pid=180 2024-11-25T17:10:43,192 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-11-25T17:10:43,192 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4190 sec 2024-11-25T17:10:43,193 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees in 2.4240 sec 2024-11-25T17:10:43,487 DEBUG [Thread-2578 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c9b5141 to 127.0.0.1:56265 2024-11-25T17:10:43,487 DEBUG [Thread-2578 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:44,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-25T17:10:44,878 INFO [Thread-2582 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-11-25T17:10:49,676 DEBUG [Thread-2580 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11a52cdf to 127.0.0.1:56265 2024-11-25T17:10:49,676 DEBUG [Thread-2580 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:49,676 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-25T17:10:49,676 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 105 2024-11-25T17:10:49,677 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 102 2024-11-25T17:10:49,677 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 106 2024-11-25T17:10:49,677 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-11-25T17:10:49,677 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 44 2024-11-25T17:10:49,677 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-25T17:10:49,677 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4008 2024-11-25T17:10:49,677 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4089 2024-11-25T17:10:49,677 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4103 2024-11-25T17:10:49,677 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4039 2024-11-25T17:10:49,677 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3974 2024-11-25T17:10:49,677 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-25T17:10:49,677 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-25T17:10:49,677 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x635b1751 to 127.0.0.1:56265 2024-11-25T17:10:49,677 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:49,677 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-25T17:10:49,678 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-25T17:10:49,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=181, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-25T17:10:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-25T17:10:49,680 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554649680"}]},"ts":"1732554649680"} 2024-11-25T17:10:49,681 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-25T17:10:49,683 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-25T17:10:49,684 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-25T17:10:49,685 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c2213e06f0c6c3750162aafa4b26c5ef, UNASSIGN}] 2024-11-25T17:10:49,685 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=183, ppid=182, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c2213e06f0c6c3750162aafa4b26c5ef, UNASSIGN 2024-11-25T17:10:49,686 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=183 updating hbase:meta row=c2213e06f0c6c3750162aafa4b26c5ef, regionState=CLOSING, regionLocation=6579369734b6,41865,1732554474464 2024-11-25T17:10:49,686 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-25T17:10:49,686 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE; CloseRegionProcedure c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464}] 2024-11-25T17:10:49,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-25T17:10:49,837 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6579369734b6,41865,1732554474464 2024-11-25T17:10:49,838 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] handler.UnassignRegionHandler(124): Close c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:49,838 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-25T17:10:49,838 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1681): Closing c2213e06f0c6c3750162aafa4b26c5ef, disabling compactions & flushes 2024-11-25T17:10:49,838 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:49,838 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:49,839 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. after waiting 0 ms 2024-11-25T17:10:49,839 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:49,839 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(2837): Flushing c2213e06f0c6c3750162aafa4b26c5ef 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-25T17:10:49,839 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=A 2024-11-25T17:10:49,839 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:49,839 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=B 2024-11-25T17:10:49,839 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:49,839 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c2213e06f0c6c3750162aafa4b26c5ef, store=C 2024-11-25T17:10:49,839 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-25T17:10:49,857 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125378432cd789a49cca5db8c92ce0263e2_c2213e06f0c6c3750162aafa4b26c5ef is 50, key is test_row_0/A:col10/1732554642031/Put/seqid=0 2024-11-25T17:10:49,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742558_1734 (size=12454) 2024-11-25T17:10:49,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-25T17:10:50,271 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T17:10:50,275 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241125378432cd789a49cca5db8c92ce0263e2_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125378432cd789a49cca5db8c92ce0263e2_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:50,275 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/cb2782e8a7d3411b8d42e5ed9d03c5f9, store: [table=TestAcidGuarantees family=A region=c2213e06f0c6c3750162aafa4b26c5ef] 2024-11-25T17:10:50,276 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/cb2782e8a7d3411b8d42e5ed9d03c5f9 is 175, key is test_row_0/A:col10/1732554642031/Put/seqid=0 2024-11-25T17:10:50,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742559_1735 (size=31255) 2024-11-25T17:10:50,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-25T17:10:50,679 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=542, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/cb2782e8a7d3411b8d42e5ed9d03c5f9 2024-11-25T17:10:50,686 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/043c094f3d4c46829674fc9f075693cc is 50, key is test_row_0/B:col10/1732554642031/Put/seqid=0 2024-11-25T17:10:50,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742560_1736 (size=12301) 2024-11-25T17:10:50,695 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=542 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/043c094f3d4c46829674fc9f075693cc 2024-11-25T17:10:50,701 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/0f34cab3161a486088780f0c3bf7769e is 50, key is test_row_0/C:col10/1732554642031/Put/seqid=0 2024-11-25T17:10:50,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742561_1737 (size=12301) 2024-11-25T17:10:50,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-25T17:10:51,118 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=542 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/0f34cab3161a486088780f0c3bf7769e 2024-11-25T17:10:51,121 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/A/cb2782e8a7d3411b8d42e5ed9d03c5f9 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/cb2782e8a7d3411b8d42e5ed9d03c5f9 2024-11-25T17:10:51,123 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/cb2782e8a7d3411b8d42e5ed9d03c5f9, entries=150, sequenceid=542, filesize=30.5 K 2024-11-25T17:10:51,124 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/B/043c094f3d4c46829674fc9f075693cc as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/043c094f3d4c46829674fc9f075693cc 2024-11-25T17:10:51,126 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/043c094f3d4c46829674fc9f075693cc, entries=150, sequenceid=542, filesize=12.0 K 2024-11-25T17:10:51,127 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/.tmp/C/0f34cab3161a486088780f0c3bf7769e as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/0f34cab3161a486088780f0c3bf7769e 2024-11-25T17:10:51,129 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/0f34cab3161a486088780f0c3bf7769e, entries=150, sequenceid=542, filesize=12.0 K 2024-11-25T17:10:51,130 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for c2213e06f0c6c3750162aafa4b26c5ef in 1290ms, sequenceid=542, compaction requested=true 2024-11-25T17:10:51,130 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/88c6522200574c3ab000210c50a32a31, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/760ccaa8803c44d8bf1766c789d5176b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/899710e7c02f4f5b84698ea575ecfbcc, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6fc048c2aa9f448a99d738337149affe, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/21ef1dc9c7ff4d34a2dc0f30d6345e38, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/e9b710cee89e485f85a33f11c37059b2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/390dfcd3f4274275a8e8c497e57c7f49, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/cbab9ee9e0644ff2b34074926a361e35, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/fed3e7e7231c4b88adf948bff3044584, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6d8653ccc3a740d78e883b69eec2a4e9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/ee560ecb3b244b2d8db60683b9393a62, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/de6b7b29b7a342f8aee5a04e0068d1fc, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/69578324487048128b6df6621b233868, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/7e05393a6565481db29992797f4b5d6f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/4e0c7fbe40e94cb29620d3db52d680e1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/fd2fa8b58bdf4777ba853abe3ff269be, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a38f5626aa914424a2a547d1a7fd70f8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/35496487940d4ef6a2a4be9dc95420fe, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/42e9bcb6ec5b4a37b0753c545164f519, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/c05db277e4c0418fa597315c4397f791, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/f5158eb2fe9a450bba3214d0c594d222, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3628d08c58ac47658cdb3e1d7cfa318e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3193e065328a408eaacf40f2d5ce14ac, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/4f4c8f14ad0e4447aa622a3310be02ee, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/639884beda844163926c583adfb7a145, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a2eaaac2005845f0acb9d4c9c432f3fa, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3f3b2d91ab8c4b2eb852c1ba65b5d407, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/d8395edac19f4c75b1e2eb389b8306fb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/b945217cccfb4c04bac1b7a7b2e1007d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6e9945cc8adc40f9a79a3b90cefd366f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/7b5a6250ad8144549880ee93f40e2818, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a115349417854f39a6cbbd4796b70a49, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/b4b0a2e4815541beb67345a81360a603, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/c06c7a0472754d288b6de2df7cc387fe, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/f4447d8efd5d492a84311a0445aa72ef] to archive 2024-11-25T17:10:51,131 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:10:51,132 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/88c6522200574c3ab000210c50a32a31 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/88c6522200574c3ab000210c50a32a31 2024-11-25T17:10:51,133 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/760ccaa8803c44d8bf1766c789d5176b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/760ccaa8803c44d8bf1766c789d5176b 2024-11-25T17:10:51,133 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/899710e7c02f4f5b84698ea575ecfbcc to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/899710e7c02f4f5b84698ea575ecfbcc 2024-11-25T17:10:51,134 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6fc048c2aa9f448a99d738337149affe to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6fc048c2aa9f448a99d738337149affe 2024-11-25T17:10:51,135 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/21ef1dc9c7ff4d34a2dc0f30d6345e38 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/21ef1dc9c7ff4d34a2dc0f30d6345e38 2024-11-25T17:10:51,136 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/e9b710cee89e485f85a33f11c37059b2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/e9b710cee89e485f85a33f11c37059b2 2024-11-25T17:10:51,137 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/390dfcd3f4274275a8e8c497e57c7f49 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/390dfcd3f4274275a8e8c497e57c7f49 2024-11-25T17:10:51,137 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/cbab9ee9e0644ff2b34074926a361e35 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/cbab9ee9e0644ff2b34074926a361e35 2024-11-25T17:10:51,138 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/fed3e7e7231c4b88adf948bff3044584 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/fed3e7e7231c4b88adf948bff3044584 2024-11-25T17:10:51,139 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6d8653ccc3a740d78e883b69eec2a4e9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6d8653ccc3a740d78e883b69eec2a4e9 2024-11-25T17:10:51,140 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/ee560ecb3b244b2d8db60683b9393a62 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/ee560ecb3b244b2d8db60683b9393a62 2024-11-25T17:10:51,140 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/de6b7b29b7a342f8aee5a04e0068d1fc to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/de6b7b29b7a342f8aee5a04e0068d1fc 2024-11-25T17:10:51,141 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/69578324487048128b6df6621b233868 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/69578324487048128b6df6621b233868 2024-11-25T17:10:51,142 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/7e05393a6565481db29992797f4b5d6f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/7e05393a6565481db29992797f4b5d6f 2024-11-25T17:10:51,143 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/4e0c7fbe40e94cb29620d3db52d680e1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/4e0c7fbe40e94cb29620d3db52d680e1 2024-11-25T17:10:51,143 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/fd2fa8b58bdf4777ba853abe3ff269be to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/fd2fa8b58bdf4777ba853abe3ff269be 2024-11-25T17:10:51,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a38f5626aa914424a2a547d1a7fd70f8 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a38f5626aa914424a2a547d1a7fd70f8 2024-11-25T17:10:51,145 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/35496487940d4ef6a2a4be9dc95420fe to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/35496487940d4ef6a2a4be9dc95420fe 2024-11-25T17:10:51,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/42e9bcb6ec5b4a37b0753c545164f519 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/42e9bcb6ec5b4a37b0753c545164f519 2024-11-25T17:10:51,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/c05db277e4c0418fa597315c4397f791 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/c05db277e4c0418fa597315c4397f791 2024-11-25T17:10:51,147 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/f5158eb2fe9a450bba3214d0c594d222 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/f5158eb2fe9a450bba3214d0c594d222 2024-11-25T17:10:51,148 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3628d08c58ac47658cdb3e1d7cfa318e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3628d08c58ac47658cdb3e1d7cfa318e 2024-11-25T17:10:51,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3193e065328a408eaacf40f2d5ce14ac to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3193e065328a408eaacf40f2d5ce14ac 2024-11-25T17:10:51,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/4f4c8f14ad0e4447aa622a3310be02ee to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/4f4c8f14ad0e4447aa622a3310be02ee 2024-11-25T17:10:51,150 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/639884beda844163926c583adfb7a145 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/639884beda844163926c583adfb7a145 2024-11-25T17:10:51,151 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a2eaaac2005845f0acb9d4c9c432f3fa to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a2eaaac2005845f0acb9d4c9c432f3fa 2024-11-25T17:10:51,152 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3f3b2d91ab8c4b2eb852c1ba65b5d407 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/3f3b2d91ab8c4b2eb852c1ba65b5d407 2024-11-25T17:10:51,152 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/d8395edac19f4c75b1e2eb389b8306fb to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/d8395edac19f4c75b1e2eb389b8306fb 2024-11-25T17:10:51,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/b945217cccfb4c04bac1b7a7b2e1007d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/b945217cccfb4c04bac1b7a7b2e1007d 2024-11-25T17:10:51,154 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6e9945cc8adc40f9a79a3b90cefd366f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/6e9945cc8adc40f9a79a3b90cefd366f 2024-11-25T17:10:51,155 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/7b5a6250ad8144549880ee93f40e2818 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/7b5a6250ad8144549880ee93f40e2818 2024-11-25T17:10:51,155 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a115349417854f39a6cbbd4796b70a49 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/a115349417854f39a6cbbd4796b70a49 2024-11-25T17:10:51,156 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/b4b0a2e4815541beb67345a81360a603 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/b4b0a2e4815541beb67345a81360a603 2024-11-25T17:10:51,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/c06c7a0472754d288b6de2df7cc387fe to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/c06c7a0472754d288b6de2df7cc387fe 2024-11-25T17:10:51,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/f4447d8efd5d492a84311a0445aa72ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/f4447d8efd5d492a84311a0445aa72ef 2024-11-25T17:10:51,158 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/f3a91a9458fa49a687dbf99a961414b0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1137cd2eb60a44e3ac663cd5b5781bbe, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/c8f915e3df82409097f70090978286f8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/a0f103b3dc3943b09d3ffb150165acde, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/400043c1e38a4d049e206c087a1c8ee8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/67b6c69edf5f4dab909213e70de29f84, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3640a04b1f664cbd9105775848eae9e6, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/935c3da8140941ffa88f503c27e397d9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/63933b197e424950b6e667879e3ccbd7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/6511dba193d34220ad9d6f1b27bf952e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/0c1a67d2919d443890182b5093d8c68f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1376ad1d2db34770a7f9f66ce6790081, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/d11a8fe6001c46fa9ab3f8fb3f7e81e3, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/38af93d56bb941a0a3235705f14b1244, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/54b8c2fd1fc3464c917cd8ec356252ce, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/13de382ece3b42bcbaaf4830f18fce60, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3b6ac6520cce450d811965be0c2b748f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/467568364aa64c399e105de36d6f1993, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/da472b75c7dc4e9f9ad74cca24fadfe8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/ef0ac4b22aca4ee2a94f4315f2a801a2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/ae69b247400c49b7bee317db20d62dec, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/7690dbeb912042e5b3b8a55374bba499, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/43a92d559cb34602a2e33ce71c0f0a25, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/b29b52d37e4b4632b1fd6cef1d8c6ffb, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/a7d45af9308d4bba8d159f847a3c1bdc, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/a4ab0d0475f34e12848d89e5e1ee674c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/b0b4eec7bf9f402caa56b502b37c809a, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/6ed1a116f40746bf9eff7840e49cb4d4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/592ab8ece1b14cc882641ddb4a6481de, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3019da99338f4bd790dd674752685cfd, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/06ac006704844de3ae459f6d8b659a83, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/200d176741aa427a95ec32599794f68e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1068d45459dc4f339f14e3284b199371, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/300e7e5223d640a988ec3737677867b8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/004e0cb17861426995fe0a576cd0d916] to archive 2024-11-25T17:10:51,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:10:51,160 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/f3a91a9458fa49a687dbf99a961414b0 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/f3a91a9458fa49a687dbf99a961414b0 2024-11-25T17:10:51,161 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1137cd2eb60a44e3ac663cd5b5781bbe to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1137cd2eb60a44e3ac663cd5b5781bbe 2024-11-25T17:10:51,162 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/c8f915e3df82409097f70090978286f8 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/c8f915e3df82409097f70090978286f8 2024-11-25T17:10:51,162 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/a0f103b3dc3943b09d3ffb150165acde to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/a0f103b3dc3943b09d3ffb150165acde 2024-11-25T17:10:51,163 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/400043c1e38a4d049e206c087a1c8ee8 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/400043c1e38a4d049e206c087a1c8ee8 2024-11-25T17:10:51,164 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/67b6c69edf5f4dab909213e70de29f84 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/67b6c69edf5f4dab909213e70de29f84 2024-11-25T17:10:51,165 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3640a04b1f664cbd9105775848eae9e6 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3640a04b1f664cbd9105775848eae9e6 2024-11-25T17:10:51,166 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/935c3da8140941ffa88f503c27e397d9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/935c3da8140941ffa88f503c27e397d9 2024-11-25T17:10:51,167 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/63933b197e424950b6e667879e3ccbd7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/63933b197e424950b6e667879e3ccbd7 2024-11-25T17:10:51,168 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/6511dba193d34220ad9d6f1b27bf952e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/6511dba193d34220ad9d6f1b27bf952e 2024-11-25T17:10:51,168 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/0c1a67d2919d443890182b5093d8c68f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/0c1a67d2919d443890182b5093d8c68f 2024-11-25T17:10:51,169 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1376ad1d2db34770a7f9f66ce6790081 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1376ad1d2db34770a7f9f66ce6790081 2024-11-25T17:10:51,170 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/d11a8fe6001c46fa9ab3f8fb3f7e81e3 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/d11a8fe6001c46fa9ab3f8fb3f7e81e3 2024-11-25T17:10:51,170 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/38af93d56bb941a0a3235705f14b1244 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/38af93d56bb941a0a3235705f14b1244 2024-11-25T17:10:51,171 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/54b8c2fd1fc3464c917cd8ec356252ce to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/54b8c2fd1fc3464c917cd8ec356252ce 2024-11-25T17:10:51,172 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/13de382ece3b42bcbaaf4830f18fce60 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/13de382ece3b42bcbaaf4830f18fce60 2024-11-25T17:10:51,173 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3b6ac6520cce450d811965be0c2b748f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3b6ac6520cce450d811965be0c2b748f 2024-11-25T17:10:51,174 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/467568364aa64c399e105de36d6f1993 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/467568364aa64c399e105de36d6f1993 2024-11-25T17:10:51,174 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/da472b75c7dc4e9f9ad74cca24fadfe8 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/da472b75c7dc4e9f9ad74cca24fadfe8 2024-11-25T17:10:51,175 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/ef0ac4b22aca4ee2a94f4315f2a801a2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/ef0ac4b22aca4ee2a94f4315f2a801a2 2024-11-25T17:10:51,176 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/ae69b247400c49b7bee317db20d62dec to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/ae69b247400c49b7bee317db20d62dec 2024-11-25T17:10:51,178 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/7690dbeb912042e5b3b8a55374bba499 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/7690dbeb912042e5b3b8a55374bba499 2024-11-25T17:10:51,179 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/43a92d559cb34602a2e33ce71c0f0a25 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/43a92d559cb34602a2e33ce71c0f0a25 2024-11-25T17:10:51,180 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/b29b52d37e4b4632b1fd6cef1d8c6ffb to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/b29b52d37e4b4632b1fd6cef1d8c6ffb 2024-11-25T17:10:51,181 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/a7d45af9308d4bba8d159f847a3c1bdc to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/a7d45af9308d4bba8d159f847a3c1bdc 2024-11-25T17:10:51,182 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/a4ab0d0475f34e12848d89e5e1ee674c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/a4ab0d0475f34e12848d89e5e1ee674c 2024-11-25T17:10:51,183 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/b0b4eec7bf9f402caa56b502b37c809a to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/b0b4eec7bf9f402caa56b502b37c809a 2024-11-25T17:10:51,184 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/6ed1a116f40746bf9eff7840e49cb4d4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/6ed1a116f40746bf9eff7840e49cb4d4 2024-11-25T17:10:51,184 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/592ab8ece1b14cc882641ddb4a6481de to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/592ab8ece1b14cc882641ddb4a6481de 2024-11-25T17:10:51,185 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3019da99338f4bd790dd674752685cfd to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/3019da99338f4bd790dd674752685cfd 2024-11-25T17:10:51,186 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/06ac006704844de3ae459f6d8b659a83 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/06ac006704844de3ae459f6d8b659a83 2024-11-25T17:10:51,187 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/200d176741aa427a95ec32599794f68e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/200d176741aa427a95ec32599794f68e 2024-11-25T17:10:51,187 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1068d45459dc4f339f14e3284b199371 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/1068d45459dc4f339f14e3284b199371 2024-11-25T17:10:51,188 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/300e7e5223d640a988ec3737677867b8 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/300e7e5223d640a988ec3737677867b8 2024-11-25T17:10:51,189 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/004e0cb17861426995fe0a576cd0d916 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/004e0cb17861426995fe0a576cd0d916 2024-11-25T17:10:51,190 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/49064be13a0d456b8ec8a9faf2852ed4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/1168dde13ef84c4483afd8412e73540f, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/de598d7bd63144a4bea1af21ec630598, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/f243ce3cd54949d9b82b092a2867a260, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/67e6450c24b54b768071bb2c41e956e7, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/f16ff0241e5c4850952d0b15791ae080, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/fe04dd247e3846f4bb017de4d3e240f8, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b40902f6b4e544f9b1d00051215861d6, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/d55260ad24314b62a001f88a82aff9f2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/32939c68654d42678da5876191077ae0, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/9d46fff8504d43878f07e64975063372, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/db2278dd928e4945a0bada9ae33bdb7c, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/dddb377888e34243ad1c638456a1baed, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/055ce72fb4eb403a9317df8d79fc49a1, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/a043eb7534ef416dab08878287c45229, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b0be6a84090348c78152c616cc862f92, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/a92ef0df6b1342b3920e61857804f265, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/45def14e4d034a6f8042d57519a23bc6, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/eeedc778609146a7b0440f26b7785aa2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b04ff3e5c20242609c5a34d71555b0d4, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b3a13625e09c4926a63f5a66bf5e18fd, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/fe97a8d3166643ff9a4b92a56b1b0533, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/71dfedee29e94ee68c875a68c3125c90, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/e0f56b019aa649c79e1698e5e7200504, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/f5c1a59980d347aeae0e35724e3ec299, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/530898b084f0490c91d2f5260fbb6b34, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/055e6cad7656445cabacd176de97ce8e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/5045fe9245b3402396e48371e56cb47e, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/525ceba6db184a1ab502bdeb9a3aa26b, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/e2ee196a3add4dfe9970e1a7c1f3c43d, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/de144886405a43e69110bde0e92d5be2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/d50b44087162442f870b3bb80a768cf2, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/91adc89c79094fcbb4d322fb86edd6f9, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/1c57371690f744d4af54eb798c2c1106, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/2f1372b540b74991902ac565fec9de51] to archive 2024-11-25T17:10:51,191 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T17:10:51,192 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/49064be13a0d456b8ec8a9faf2852ed4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/49064be13a0d456b8ec8a9faf2852ed4 2024-11-25T17:10:51,193 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/1168dde13ef84c4483afd8412e73540f to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/1168dde13ef84c4483afd8412e73540f 2024-11-25T17:10:51,194 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/de598d7bd63144a4bea1af21ec630598 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/de598d7bd63144a4bea1af21ec630598 2024-11-25T17:10:51,195 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/f243ce3cd54949d9b82b092a2867a260 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/f243ce3cd54949d9b82b092a2867a260 2024-11-25T17:10:51,195 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/67e6450c24b54b768071bb2c41e956e7 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/67e6450c24b54b768071bb2c41e956e7 2024-11-25T17:10:51,196 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/f16ff0241e5c4850952d0b15791ae080 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/f16ff0241e5c4850952d0b15791ae080 2024-11-25T17:10:51,197 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/fe04dd247e3846f4bb017de4d3e240f8 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/fe04dd247e3846f4bb017de4d3e240f8 2024-11-25T17:10:51,197 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b40902f6b4e544f9b1d00051215861d6 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b40902f6b4e544f9b1d00051215861d6 2024-11-25T17:10:51,198 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/d55260ad24314b62a001f88a82aff9f2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/d55260ad24314b62a001f88a82aff9f2 2024-11-25T17:10:51,199 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/32939c68654d42678da5876191077ae0 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/32939c68654d42678da5876191077ae0 2024-11-25T17:10:51,199 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/9d46fff8504d43878f07e64975063372 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/9d46fff8504d43878f07e64975063372 2024-11-25T17:10:51,200 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/db2278dd928e4945a0bada9ae33bdb7c to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/db2278dd928e4945a0bada9ae33bdb7c 2024-11-25T17:10:51,201 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/dddb377888e34243ad1c638456a1baed to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/dddb377888e34243ad1c638456a1baed 2024-11-25T17:10:51,202 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/055ce72fb4eb403a9317df8d79fc49a1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/055ce72fb4eb403a9317df8d79fc49a1 2024-11-25T17:10:51,202 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/a043eb7534ef416dab08878287c45229 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/a043eb7534ef416dab08878287c45229 2024-11-25T17:10:51,203 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b0be6a84090348c78152c616cc862f92 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b0be6a84090348c78152c616cc862f92 2024-11-25T17:10:51,204 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/a92ef0df6b1342b3920e61857804f265 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/a92ef0df6b1342b3920e61857804f265 2024-11-25T17:10:51,204 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/45def14e4d034a6f8042d57519a23bc6 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/45def14e4d034a6f8042d57519a23bc6 2024-11-25T17:10:51,205 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/eeedc778609146a7b0440f26b7785aa2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/eeedc778609146a7b0440f26b7785aa2 2024-11-25T17:10:51,207 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b04ff3e5c20242609c5a34d71555b0d4 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b04ff3e5c20242609c5a34d71555b0d4 2024-11-25T17:10:51,208 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b3a13625e09c4926a63f5a66bf5e18fd to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/b3a13625e09c4926a63f5a66bf5e18fd 2024-11-25T17:10:51,209 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/fe97a8d3166643ff9a4b92a56b1b0533 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/fe97a8d3166643ff9a4b92a56b1b0533 2024-11-25T17:10:51,210 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/71dfedee29e94ee68c875a68c3125c90 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/71dfedee29e94ee68c875a68c3125c90 2024-11-25T17:10:51,211 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/e0f56b019aa649c79e1698e5e7200504 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/e0f56b019aa649c79e1698e5e7200504 2024-11-25T17:10:51,212 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/f5c1a59980d347aeae0e35724e3ec299 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/f5c1a59980d347aeae0e35724e3ec299 2024-11-25T17:10:51,213 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/530898b084f0490c91d2f5260fbb6b34 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/530898b084f0490c91d2f5260fbb6b34 2024-11-25T17:10:51,214 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/055e6cad7656445cabacd176de97ce8e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/055e6cad7656445cabacd176de97ce8e 2024-11-25T17:10:51,215 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/5045fe9245b3402396e48371e56cb47e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/5045fe9245b3402396e48371e56cb47e 2024-11-25T17:10:51,216 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/525ceba6db184a1ab502bdeb9a3aa26b to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/525ceba6db184a1ab502bdeb9a3aa26b 2024-11-25T17:10:51,217 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/e2ee196a3add4dfe9970e1a7c1f3c43d to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/e2ee196a3add4dfe9970e1a7c1f3c43d 2024-11-25T17:10:51,218 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/de144886405a43e69110bde0e92d5be2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/de144886405a43e69110bde0e92d5be2 2024-11-25T17:10:51,218 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/d50b44087162442f870b3bb80a768cf2 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/d50b44087162442f870b3bb80a768cf2 2024-11-25T17:10:51,219 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/91adc89c79094fcbb4d322fb86edd6f9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/91adc89c79094fcbb4d322fb86edd6f9 2024-11-25T17:10:51,220 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/1c57371690f744d4af54eb798c2c1106 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/1c57371690f744d4af54eb798c2c1106 2024-11-25T17:10:51,221 DEBUG [StoreCloser-TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/2f1372b540b74991902ac565fec9de51 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/2f1372b540b74991902ac565fec9de51 2024-11-25T17:10:51,224 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/recovered.edits/545.seqid, newMaxSeqId=545, maxSeqId=4 2024-11-25T17:10:51,224 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef. 2024-11-25T17:10:51,224 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] regionserver.HRegion(1635): Region close journal for c2213e06f0c6c3750162aafa4b26c5ef: 2024-11-25T17:10:51,226 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION, pid=184}] handler.UnassignRegionHandler(170): Closed c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,226 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=183 updating hbase:meta row=c2213e06f0c6c3750162aafa4b26c5ef, regionState=CLOSED 2024-11-25T17:10:51,228 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=183 2024-11-25T17:10:51,228 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=183, state=SUCCESS; CloseRegionProcedure c2213e06f0c6c3750162aafa4b26c5ef, server=6579369734b6,41865,1732554474464 in 1.5410 sec 2024-11-25T17:10:51,228 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=183, resume processing ppid=182 2024-11-25T17:10:51,228 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, ppid=182, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c2213e06f0c6c3750162aafa4b26c5ef, UNASSIGN in 1.5430 sec 2024-11-25T17:10:51,230 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-11-25T17:10:51,230 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5450 sec 2024-11-25T17:10:51,230 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732554651230"}]},"ts":"1732554651230"} 2024-11-25T17:10:51,231 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-25T17:10:51,233 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-25T17:10:51,234 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5560 sec 2024-11-25T17:10:51,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-25T17:10:51,788 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 181 completed 2024-11-25T17:10:51,789 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-25T17:10:51,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:10:51,790 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=185, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:10:51,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-25T17:10:51,791 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=185, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:10:51,798 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,800 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C, FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/recovered.edits] 2024-11-25T17:10:51,803 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/384b227d74ce4a2a98c60a601eebcfb1 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/384b227d74ce4a2a98c60a601eebcfb1 2024-11-25T17:10:51,804 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/9e8b9f5fbd6e4ae7836e5e1a5ad880ac to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/9e8b9f5fbd6e4ae7836e5e1a5ad880ac 2024-11-25T17:10:51,805 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/cb2782e8a7d3411b8d42e5ed9d03c5f9 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/A/cb2782e8a7d3411b8d42e5ed9d03c5f9 2024-11-25T17:10:51,807 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/043c094f3d4c46829674fc9f075693cc to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/043c094f3d4c46829674fc9f075693cc 2024-11-25T17:10:51,808 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/7a75443aa4474083b189f5a5e9e8acad to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/7a75443aa4474083b189f5a5e9e8acad 2024-11-25T17:10:51,809 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/bfc40640f8424e7daf48aa8480fd5ad5 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/B/bfc40640f8424e7daf48aa8480fd5ad5 2024-11-25T17:10:51,811 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/0c2c33cc192c4df299c61f73de941473 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/0c2c33cc192c4df299c61f73de941473 2024-11-25T17:10:51,812 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/0f34cab3161a486088780f0c3bf7769e to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/0f34cab3161a486088780f0c3bf7769e 2024-11-25T17:10:51,813 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/8bbf4d202a44472fbdb21757e280d388 to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/C/8bbf4d202a44472fbdb21757e280d388 2024-11-25T17:10:51,815 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/recovered.edits/545.seqid to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef/recovered.edits/545.seqid 2024-11-25T17:10:51,816 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/default/TestAcidGuarantees/c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,816 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-25T17:10:51,816 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-25T17:10:51,817 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-25T17:10:51,820 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125047328a8003a436484490029adb7c8e4_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125047328a8003a436484490029adb7c8e4_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,821 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411250c3ebda1c6554cefbb04e863c09cd1d4_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411250c3ebda1c6554cefbb04e863c09cd1d4_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,822 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125199e09def6c341ae91da8679e9f842b1_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125199e09def6c341ae91da8679e9f842b1_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,823 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411251c94b731ac9f42bea1a862016291f690_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411251c94b731ac9f42bea1a862016291f690_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,824 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125273e16518f204794b4e638fe0a11130a_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125273e16518f204794b4e638fe0a11130a_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,825 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411252e89429ecad0438bbf4a546cf2dfb99e_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411252e89429ecad0438bbf4a546cf2dfb99e_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,826 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125378432cd789a49cca5db8c92ce0263e2_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125378432cd789a49cca5db8c92ce0263e2_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,827 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411254ef1432f3d284769b28ce1a0eb49f538_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411254ef1432f3d284769b28ce1a0eb49f538_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,828 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112567f9f596746f4e5b957e8599d5e29ff4_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112567f9f596746f4e5b957e8599d5e29ff4_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,829 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411256f83b24c4b3a4c78a40d1d4ab183e3de_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411256f83b24c4b3a4c78a40d1d4ab183e3de_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,831 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112574912ec812fc42308bc0543afb3e89e5_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112574912ec812fc42308bc0543afb3e89e5_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,832 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125798bacfe3ba140e085b4838598ecb00f_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125798bacfe3ba140e085b4838598ecb00f_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,832 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125844f3251d6cb47a08e6dae9b81526bff_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125844f3251d6cb47a08e6dae9b81526bff_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,833 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411258b98738dbffe4f65aaa77855989b41f7_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411258b98738dbffe4f65aaa77855989b41f7_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,834 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411258de6b788b1af4d938cd0198f63ecaf5d_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411258de6b788b1af4d938cd0198f63ecaf5d_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,836 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125a42028c1d9db4a599fca2222ce9b0aa3_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125a42028c1d9db4a599fca2222ce9b0aa3_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,837 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125b39ab163336848d697edbf4fdbd1d5a5_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125b39ab163336848d697edbf4fdbd1d5a5_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,838 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125b817011f47e244d98ad7cd13ae5c1f95_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125b817011f47e244d98ad7cd13ae5c1f95_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,839 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125c4f05b87685e4742a9f28a732a9324bb_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125c4f05b87685e4742a9f28a732a9324bb_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,840 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ca4b0cba992044bab0690060be8278a5_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ca4b0cba992044bab0690060be8278a5_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,841 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125d9b407f607474346a6cd9064b3738519_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125d9b407f607474346a6cd9064b3738519_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,842 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125da2dc5f935ea49398e054b31ac00f211_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125da2dc5f935ea49398e054b31ac00f211_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,843 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125e3aab389a0a54c879d2bd0d03085e9be_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125e3aab389a0a54c879d2bd0d03085e9be_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,844 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ebc0d81856524bb5939e7b2e309281fd_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125ebc0d81856524bb5939e7b2e309281fd_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,845 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125f5994023b51b42268f7b180e6de26642_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125f5994023b51b42268f7b180e6de26642_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,846 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125f9ccc7622db54b7b85ea019807909624_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125f9ccc7622db54b7b85ea019807909624_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,847 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125fb90f42ef88346979909b45815be6096_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125fb90f42ef88346979909b45815be6096_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,848 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125fc9e05c6fc1d4830a010626f87fa1ba2_c2213e06f0c6c3750162aafa4b26c5ef to hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241125fc9e05c6fc1d4830a010626f87fa1ba2_c2213e06f0c6c3750162aafa4b26c5ef 2024-11-25T17:10:51,849 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-25T17:10:51,851 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=185, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:10:51,852 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-25T17:10:51,854 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-25T17:10:51,855 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=185, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:10:51,855 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-25T17:10:51,855 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732554651855"}]},"ts":"9223372036854775807"} 2024-11-25T17:10:51,857 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-25T17:10:51,857 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => c2213e06f0c6c3750162aafa4b26c5ef, NAME => 'TestAcidGuarantees,,1732554617410.c2213e06f0c6c3750162aafa4b26c5ef.', STARTKEY => '', ENDKEY => ''}] 2024-11-25T17:10:51,857 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-25T17:10:51,857 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732554651857"}]},"ts":"9223372036854775807"} 2024-11-25T17:10:51,858 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-25T17:10:51,861 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=185, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-25T17:10:51,861 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 72 msec 2024-11-25T17:10:51,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33083 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-25T17:10:51,892 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 185 completed 2024-11-25T17:10:51,903 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=237 (was 241), OpenFileDescriptor=449 (was 459), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=837 (was 856), ProcessCount=11 (was 11), AvailableMemoryMB=1947 (was 1481) - AvailableMemoryMB LEAK? - 2024-11-25T17:10:51,903 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-25T17:10:51,903 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-25T17:10:51,903 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3771e354 to 127.0.0.1:56265 2024-11-25T17:10:51,903 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:51,903 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T17:10:51,903 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=572881020, stopped=false 2024-11-25T17:10:51,904 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=6579369734b6,33083,1732554473669 2024-11-25T17:10:51,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T17:10:51,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T17:10:51,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:10:51,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:10:51,906 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-25T17:10:51,906 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T17:10:51,906 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T17:10:51,907 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:51,907 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '6579369734b6,41865,1732554474464' ***** 2024-11-25T17:10:51,907 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-25T17:10:51,908 INFO [RS:0;6579369734b6:41865 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T17:10:51,908 INFO [RS:0;6579369734b6:41865 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T17:10:51,908 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-25T17:10:51,908 INFO [RS:0;6579369734b6:41865 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T17:10:51,908 INFO [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(3579): Received CLOSE for 44c7b6d5dcb77061152173d1606a877a 2024-11-25T17:10:51,908 INFO [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1224): stopping server 6579369734b6,41865,1732554474464 2024-11-25T17:10:51,908 DEBUG [RS:0;6579369734b6:41865 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:51,908 INFO [RS:0;6579369734b6:41865 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T17:10:51,908 INFO [RS:0;6579369734b6:41865 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T17:10:51,908 INFO [RS:0;6579369734b6:41865 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T17:10:51,908 INFO [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-25T17:10:51,909 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 44c7b6d5dcb77061152173d1606a877a, disabling compactions & flushes 2024-11-25T17:10:51,909 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a. 2024-11-25T17:10:51,909 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a. 2024-11-25T17:10:51,909 INFO [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-25T17:10:51,909 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a. after waiting 0 ms 2024-11-25T17:10:51,909 DEBUG [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 44c7b6d5dcb77061152173d1606a877a=hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a.} 2024-11-25T17:10:51,909 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a. 2024-11-25T17:10:51,909 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 44c7b6d5dcb77061152173d1606a877a 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-25T17:10:51,909 DEBUG [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-25T17:10:51,909 INFO [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-25T17:10:51,909 DEBUG [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-25T17:10:51,909 DEBUG [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T17:10:51,909 DEBUG [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T17:10:51,909 INFO [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-25T17:10:51,909 DEBUG [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 44c7b6d5dcb77061152173d1606a877a 2024-11-25T17:10:51,925 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/namespace/44c7b6d5dcb77061152173d1606a877a/.tmp/info/44e2d76a7f2d4b419bb6e3f8c69db5ac is 45, key is default/info:d/1732554479347/Put/seqid=0 2024-11-25T17:10:51,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742562_1738 (size=5037) 2024-11-25T17:10:51,930 DEBUG [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/.tmp/info/2167cdd48193456fbdbf6354cfe690e5 is 143, key is hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a./info:regioninfo/1732554479199/Put/seqid=0 2024-11-25T17:10:51,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742563_1739 (size=7725) 2024-11-25T17:10:51,938 INFO [regionserver/6579369734b6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T17:10:52,109 DEBUG [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 44c7b6d5dcb77061152173d1606a877a 2024-11-25T17:10:52,310 DEBUG [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 44c7b6d5dcb77061152173d1606a877a 2024-11-25T17:10:52,328 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/namespace/44c7b6d5dcb77061152173d1606a877a/.tmp/info/44e2d76a7f2d4b419bb6e3f8c69db5ac 2024-11-25T17:10:52,332 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/namespace/44c7b6d5dcb77061152173d1606a877a/.tmp/info/44e2d76a7f2d4b419bb6e3f8c69db5ac as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/namespace/44c7b6d5dcb77061152173d1606a877a/info/44e2d76a7f2d4b419bb6e3f8c69db5ac 2024-11-25T17:10:52,334 INFO [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/.tmp/info/2167cdd48193456fbdbf6354cfe690e5 2024-11-25T17:10:52,334 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/namespace/44c7b6d5dcb77061152173d1606a877a/info/44e2d76a7f2d4b419bb6e3f8c69db5ac, entries=2, sequenceid=6, filesize=4.9 K 2024-11-25T17:10:52,335 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 44c7b6d5dcb77061152173d1606a877a in 426ms, sequenceid=6, compaction requested=false 2024-11-25T17:10:52,338 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/namespace/44c7b6d5dcb77061152173d1606a877a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-25T17:10:52,339 INFO [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a. 2024-11-25T17:10:52,339 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 44c7b6d5dcb77061152173d1606a877a: 2024-11-25T17:10:52,339 DEBUG [RS_CLOSE_REGION-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732554477889.44c7b6d5dcb77061152173d1606a877a. 2024-11-25T17:10:52,352 DEBUG [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/.tmp/rep_barrier/160573a422d9482490d2d0e5b9653acb is 102, key is TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe./rep_barrier:/1732554506325/DeleteFamily/seqid=0 2024-11-25T17:10:52,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742564_1740 (size=6025) 2024-11-25T17:10:52,510 DEBUG [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-25T17:10:52,596 INFO [regionserver/6579369734b6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-25T17:10:52,596 INFO [regionserver/6579369734b6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-25T17:10:52,625 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T17:10:52,710 DEBUG [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-25T17:10:52,755 INFO [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/.tmp/rep_barrier/160573a422d9482490d2d0e5b9653acb 2024-11-25T17:10:52,775 DEBUG [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/.tmp/table/c85ca8938f754c00928710ba267c7ca0 is 96, key is TestAcidGuarantees,,1732554479533.140432b4069c8ca485d8f3971c9e31fe./table:/1732554506325/DeleteFamily/seqid=0 2024-11-25T17:10:52,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742565_1741 (size=5942) 2024-11-25T17:10:52,910 INFO [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-25T17:10:52,910 DEBUG [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-25T17:10:52,911 DEBUG [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-25T17:10:53,111 DEBUG [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-25T17:10:53,181 INFO [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/.tmp/table/c85ca8938f754c00928710ba267c7ca0 2024-11-25T17:10:53,188 DEBUG [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/.tmp/info/2167cdd48193456fbdbf6354cfe690e5 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/info/2167cdd48193456fbdbf6354cfe690e5 2024-11-25T17:10:53,191 INFO [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/info/2167cdd48193456fbdbf6354cfe690e5, entries=22, sequenceid=93, filesize=7.5 K 2024-11-25T17:10:53,192 DEBUG [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/.tmp/rep_barrier/160573a422d9482490d2d0e5b9653acb as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/rep_barrier/160573a422d9482490d2d0e5b9653acb 2024-11-25T17:10:53,195 INFO [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/rep_barrier/160573a422d9482490d2d0e5b9653acb, entries=6, sequenceid=93, filesize=5.9 K 2024-11-25T17:10:53,196 DEBUG [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/.tmp/table/c85ca8938f754c00928710ba267c7ca0 as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/table/c85ca8938f754c00928710ba267c7ca0 2024-11-25T17:10:53,199 INFO [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/table/c85ca8938f754c00928710ba267c7ca0, entries=9, sequenceid=93, filesize=5.8 K 2024-11-25T17:10:53,200 INFO [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1291ms, sequenceid=93, compaction requested=false 2024-11-25T17:10:53,204 DEBUG [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-25T17:10:53,205 DEBUG [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T17:10:53,205 INFO [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-25T17:10:53,205 DEBUG [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-25T17:10:53,205 DEBUG [RS_CLOSE_META-regionserver/6579369734b6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T17:10:53,311 INFO [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1250): stopping server 6579369734b6,41865,1732554474464; all regions closed. 2024-11-25T17:10:53,319 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(743): complete file /user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/WALs/6579369734b6,41865,1732554474464/6579369734b6%2C41865%2C1732554474464.meta.1732554477629.meta not finished, retry = 0 2024-11-25T17:10:53,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741834_1010 (size=26050) 2024-11-25T17:10:53,430 DEBUG [RS:0;6579369734b6:41865 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/oldWALs 2024-11-25T17:10:53,430 INFO [RS:0;6579369734b6:41865 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 6579369734b6%2C41865%2C1732554474464.meta:.meta(num 1732554477629) 2024-11-25T17:10:53,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741832_1008 (size=17158874) 2024-11-25T17:10:53,469 DEBUG [RS:0;6579369734b6:41865 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/oldWALs 2024-11-25T17:10:53,470 INFO [RS:0;6579369734b6:41865 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 6579369734b6%2C41865%2C1732554474464:(num 1732554476736) 2024-11-25T17:10:53,470 DEBUG [RS:0;6579369734b6:41865 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:53,470 INFO [RS:0;6579369734b6:41865 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T17:10:53,470 INFO [RS:0;6579369734b6:41865 {}] hbase.ChoreService(370): Chore service for: regionserver/6579369734b6:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-25T17:10:53,471 INFO [RS:0;6579369734b6:41865 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.3:41865 2024-11-25T17:10:53,473 INFO [regionserver/6579369734b6:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-25T17:10:53,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6579369734b6,41865,1732554474464 2024-11-25T17:10:53,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T17:10:53,478 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6579369734b6,41865,1732554474464] 2024-11-25T17:10:53,478 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 6579369734b6,41865,1732554474464; numProcessing=1 2024-11-25T17:10:53,480 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/6579369734b6,41865,1732554474464 already deleted, retry=false 2024-11-25T17:10:53,480 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 6579369734b6,41865,1732554474464 expired; onlineServers=0 2024-11-25T17:10:53,480 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '6579369734b6,33083,1732554473669' ***** 2024-11-25T17:10:53,480 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T17:10:53,481 DEBUG [M:0;6579369734b6:33083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2234ae6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6579369734b6/172.17.0.3:0 2024-11-25T17:10:53,481 INFO [M:0;6579369734b6:33083 {}] regionserver.HRegionServer(1224): stopping server 6579369734b6,33083,1732554473669 2024-11-25T17:10:53,481 INFO [M:0;6579369734b6:33083 {}] regionserver.HRegionServer(1250): stopping server 6579369734b6,33083,1732554473669; all regions closed. 2024-11-25T17:10:53,481 DEBUG [M:0;6579369734b6:33083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T17:10:53,481 DEBUG [M:0;6579369734b6:33083 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T17:10:53,481 DEBUG [M:0;6579369734b6:33083 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T17:10:53,481 INFO [M:0;6579369734b6:33083 {}] hbase.ChoreService(370): Chore service for: master/6579369734b6:0 had [] on shutdown 2024-11-25T17:10:53,481 DEBUG [M:0;6579369734b6:33083 {}] master.HMaster(1733): Stopping service threads 2024-11-25T17:10:53,481 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T17:10:53,481 INFO [M:0;6579369734b6:33083 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T17:10:53,482 DEBUG [master/6579369734b6:0:becomeActiveMaster-HFileCleaner.large.0-1732554476374 {}] cleaner.HFileCleaner(306): Exit Thread[master/6579369734b6:0:becomeActiveMaster-HFileCleaner.large.0-1732554476374,5,FailOnTimeoutGroup] 2024-11-25T17:10:53,482 DEBUG [master/6579369734b6:0:becomeActiveMaster-HFileCleaner.small.0-1732554476375 {}] cleaner.HFileCleaner(306): Exit Thread[master/6579369734b6:0:becomeActiveMaster-HFileCleaner.small.0-1732554476375,5,FailOnTimeoutGroup] 2024-11-25T17:10:53,482 ERROR [M:0;6579369734b6:33083 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (1308410445) connection to localhost/127.0.0.1:41117 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:41117,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-25T17:10:53,483 INFO [M:0;6579369734b6:33083 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T17:10:53,485 DEBUG [M:0;6579369734b6:33083 {}] zookeeper.ZKUtil(347): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T17:10:53,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T17:10:53,485 WARN [M:0;6579369734b6:33083 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T17:10:53,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T17:10:53,485 INFO [M:0;6579369734b6:33083 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-25T17:10:53,485 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T17:10:53,485 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T17:10:53,486 INFO [M:0;6579369734b6:33083 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T17:10:53,487 DEBUG [M:0;6579369734b6:33083 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T17:10:53,487 INFO [M:0;6579369734b6:33083 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T17:10:53,487 DEBUG [M:0;6579369734b6:33083 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T17:10:53,487 DEBUG [M:0;6579369734b6:33083 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T17:10:53,487 DEBUG [M:0;6579369734b6:33083 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T17:10:53,487 INFO [M:0;6579369734b6:33083 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=805.70 KB heapSize=993.16 KB 2024-11-25T17:10:53,520 DEBUG [M:0;6579369734b6:33083 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b5203e99ce044889809ac8efaeda95fc is 82, key is hbase:meta,,1/info:regioninfo/1732554477783/Put/seqid=0 2024-11-25T17:10:53,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742566_1742 (size=5672) 2024-11-25T17:10:53,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T17:10:53,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41865-0x1012ade31b40001, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T17:10:53,581 INFO [RS:0;6579369734b6:41865 {}] regionserver.HRegionServer(1307): Exiting; stopping=6579369734b6,41865,1732554474464; zookeeper connection closed. 2024-11-25T17:10:53,585 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2a72eadc {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2a72eadc 2024-11-25T17:10:53,587 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T17:10:53,925 INFO [M:0;6579369734b6:33083 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2317 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b5203e99ce044889809ac8efaeda95fc 2024-11-25T17:10:53,985 DEBUG [M:0;6579369734b6:33083 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d5f37fbd400d42049fbe37d2d0dc2eff is 2284, key is \x00\x00\x00\x00\x00\x00\x00\xA0/proc:d/1732554620586/Put/seqid=0 2024-11-25T17:10:53,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742567_1743 (size=46101) 2024-11-25T17:10:53,995 INFO [M:0;6579369734b6:33083 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=805.14 KB at sequenceid=2317 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d5f37fbd400d42049fbe37d2d0dc2eff 2024-11-25T17:10:54,002 INFO [M:0;6579369734b6:33083 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d5f37fbd400d42049fbe37d2d0dc2eff 2024-11-25T17:10:54,044 DEBUG [M:0;6579369734b6:33083 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ec1c845a46ed48bfbe425621480533ca is 69, key is 6579369734b6,41865,1732554474464/rs:state/1732554476454/Put/seqid=0 2024-11-25T17:10:54,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073742568_1744 (size=5156) 2024-11-25T17:10:54,074 INFO [M:0;6579369734b6:33083 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2317 (bloomFilter=true), to=hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ec1c845a46ed48bfbe425621480533ca 2024-11-25T17:10:54,080 DEBUG [M:0;6579369734b6:33083 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b5203e99ce044889809ac8efaeda95fc as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b5203e99ce044889809ac8efaeda95fc 2024-11-25T17:10:54,087 INFO [M:0;6579369734b6:33083 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b5203e99ce044889809ac8efaeda95fc, entries=8, sequenceid=2317, filesize=5.5 K 2024-11-25T17:10:54,088 DEBUG [M:0;6579369734b6:33083 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d5f37fbd400d42049fbe37d2d0dc2eff as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d5f37fbd400d42049fbe37d2d0dc2eff 2024-11-25T17:10:54,093 INFO [M:0;6579369734b6:33083 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d5f37fbd400d42049fbe37d2d0dc2eff 2024-11-25T17:10:54,094 INFO [M:0;6579369734b6:33083 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d5f37fbd400d42049fbe37d2d0dc2eff, entries=185, sequenceid=2317, filesize=45.0 K 2024-11-25T17:10:54,094 DEBUG [M:0;6579369734b6:33083 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ec1c845a46ed48bfbe425621480533ca as hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ec1c845a46ed48bfbe425621480533ca 2024-11-25T17:10:54,098 INFO [M:0;6579369734b6:33083 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41117/user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ec1c845a46ed48bfbe425621480533ca, entries=1, sequenceid=2317, filesize=5.0 K 2024-11-25T17:10:54,099 INFO [M:0;6579369734b6:33083 {}] regionserver.HRegion(3040): Finished flush of dataSize ~805.70 KB/825034, heapSize ~992.86 KB/1016688, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 612ms, sequenceid=2317, compaction requested=false 2024-11-25T17:10:54,146 INFO [M:0;6579369734b6:33083 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T17:10:54,146 DEBUG [M:0;6579369734b6:33083 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-25T17:10:54,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33067 is added to blk_1073741830_1006 (size=976902) 2024-11-25T17:10:54,179 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(743): complete file /user/jenkins/test-data/ffd33fde-3807-89b9-127b-25761d7814f4/MasterData/WALs/6579369734b6,33083,1732554473669/6579369734b6%2C33083%2C1732554473669.1732554475777 not finished, retry = 0 2024-11-25T17:10:54,193 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T17:10:54,193 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T17:10:54,194 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-25T17:10:54,194 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-25T17:10:54,195 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-25T17:10:54,280 INFO [M:0;6579369734b6:33083 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-25T17:10:54,280 INFO [M:0;6579369734b6:33083 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.3:33083 2024-11-25T17:10:54,280 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-25T17:10:54,284 DEBUG [M:0;6579369734b6:33083 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/6579369734b6,33083,1732554473669 already deleted, retry=false 2024-11-25T17:10:54,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T17:10:54,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33083-0x1012ade31b40000, quorum=127.0.0.1:56265, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T17:10:54,388 INFO [M:0;6579369734b6:33083 {}] regionserver.HRegionServer(1307): Exiting; stopping=6579369734b6,33083,1732554473669; zookeeper connection closed. 2024-11-25T17:10:54,416 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10ba49e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T17:10:54,419 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@661c2e9c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T17:10:54,419 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T17:10:54,419 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ca71a25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T17:10:54,419 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@134e7cc5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/hadoop.log.dir/,STOPPED} 2024-11-25T17:10:54,424 WARN [BP-896136284-172.17.0.3-1732554469894 heartbeating to localhost/127.0.0.1:41117 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T17:10:54,424 WARN [BP-896136284-172.17.0.3-1732554469894 heartbeating to localhost/127.0.0.1:41117 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-896136284-172.17.0.3-1732554469894 (Datanode Uuid 2e3cadf8-29ef-4329-ac23-cf3711913468) service to localhost/127.0.0.1:41117 2024-11-25T17:10:54,425 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T17:10:54,425 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T17:10:54,427 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/cluster_d0c44768-23d8-26f7-d2cc-a3902272cb55/dfs/data/data1/current/BP-896136284-172.17.0.3-1732554469894 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T17:10:54,427 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/cluster_d0c44768-23d8-26f7-d2cc-a3902272cb55/dfs/data/data2/current/BP-896136284-172.17.0.3-1732554469894 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T17:10:54,427 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T17:10:54,438 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T17:10:54,439 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T17:10:54,439 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T17:10:54,440 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T17:10:54,440 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/341b9ab2-abf1-4651-1d4a-4f392103e208/hadoop.log.dir/,STOPPED} 2024-11-25T17:10:54,467 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-25T17:10:54,790 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down